repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
8,801
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s1 = input_1 # asm 1: mov <input_1=int64#2,>s1=int64#2 # asm 2: mov <input_1=%rsi,>s1=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
40,155
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: b11 = mem128[ input_2 + 176 ] x2 # asm 1: vbroadcasti128 176(<input_2=int64#3), >b11=reg256#1 # asm 2: vbroadcasti128 176(<input_2=%rdx), >b11=%ymm0 vbroadcasti128 176( % rdx), % ymm0 # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#2,<a5=reg256#2 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm1,<a5=%ymm1 vinsertf128 $0x0, 80( % rsi), % ymm1, % ymm1 # qhasm: a5[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a5=reg256#2,<a5=reg256#2 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a5=%ymm1,<a5=%ymm1 vinsertf128 $0x1, 176( % rsi), % ymm1, % ymm1 # qhasm: r16 = b11 & a5 # asm 1: vpand <b11=reg256#1,<a5=reg256#2,>r16=reg256#3 # asm 2: vpand <b11=%ymm0,<a5=%ymm1,>r16=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#3,512(<ptr=int64#4) # asm 2: vmovupd <r16=%ymm2,512(<ptr=%rcx) vmovupd % ymm2, 512( % rcx) # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#3,<a4=reg256#3 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm2,<a4=%ymm2 vinsertf128 $0x0, 64( % rsi), % ymm2, % ymm2 # qhasm: a4[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a4=reg256#3,<a4=reg256#3 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a4=%ymm2,<a4=%ymm2 vinsertf128 $0x1, 160( % rsi), % ymm2, % ymm2 # qhasm: r15 = b11 & a4 # asm 1: vpand <b11=reg256#1,<a4=reg256#3,>r15=reg256#4 # asm 2: vpand <b11=%ymm0,<a4=%ymm2,>r15=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#5,<a3=reg256#5 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm4,<a3=%ymm4 vinsertf128 $0x0, 48( % rsi), % ymm4, % ymm4 # qhasm: a3[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a3=reg256#5,<a3=reg256#5 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a3=%ymm4,<a3=%ymm4 vinsertf128 $0x1, 144( % rsi), % ymm4, % ymm4 # qhasm: r14 = b11 & a3 # asm 1: vpand <b11=reg256#1,<a3=reg256#5,>r14=reg256#6 # asm 2: vpand <b11=%ymm0,<a3=%ymm4,>r14=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#7,<a2=reg256#7 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm6,<a2=%ymm6 vinsertf128 $0x0, 32( % rsi), % ymm6, % ymm6 # qhasm: a2[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a2=reg256#7,<a2=reg256#7 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a2=%ymm6,<a2=%ymm6 vinsertf128 $0x1, 128( % rsi), % ymm6, % ymm6 # qhasm: r13 = b11 & a2 # asm 1: vpand <b11=reg256#1,<a2=reg256#7,>r13=reg256#8 # asm 2: vpand <b11=%ymm0,<a2=%ymm6,>r13=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#9,<a1=reg256#9 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm8,<a1=%ymm8 vinsertf128 $0x0, 16( % rsi), % ymm8, % ymm8 # qhasm: a1[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a1=reg256#9,<a1=reg256#9 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a1=%ymm8,<a1=%ymm8 vinsertf128 $0x1, 112( % rsi), % ymm8, % ymm8 # qhasm: r12 = b11 & a1 # asm 1: vpand <b11=reg256#1,<a1=reg256#9,>r12=reg256#10 # asm 2: vpand <b11=%ymm0,<a1=%ymm8,>r12=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#11,<a0=reg256#11 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm10,<a0=%ymm10 vinsertf128 $0x0, 0( % rsi), % ymm10, % ymm10 # qhasm: a0[1] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x1,96(<input_1=int64#2),<a0=reg256#11,<a0=reg256#11 # asm 2: vinsertf128 $0x1,96(<input_1=%rsi),<a0=%ymm10,<a0=%ymm10 vinsertf128 $0x1, 96( % rsi), % ymm10, % ymm10 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#1,<a0=reg256#11,>r11=reg256#1 # asm 2: vpand <b11=%ymm0,<a0=%ymm10,>r11=%ymm0 vpand % ymm0, % ymm10, % ymm0 # qhasm: b10 = mem128[ input_2 + 160 ] x2 # asm 1: vbroadcasti128 160(<input_2=int64#3), >b10=reg256#12 # asm 2: vbroadcasti128 160(<input_2=%rdx), >b10=%ymm11 vbroadcasti128 160( % rdx), % ymm11 # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b10=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#13,<r15=reg256#4,<r15=reg256#4 # asm 2: vpxor <r=%ymm12,<r15=%ymm3,<r15=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#4,480(<ptr=int64#4) # asm 2: vmovupd <r15=%ymm3,480(<ptr=%rcx) vmovupd % ymm3, 480( % rcx) # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#12,<a4=reg256#3,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a4=%ymm2,>r=%ymm3 vpand % ymm11, % ymm2, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm3,<r14=%ymm5,<r14=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#12,<a3=reg256#5,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a3=%ymm4,>r=%ymm3 vpand % ymm11, % ymm4, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm3,<r13=%ymm7,<r13=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#12,<a2=reg256#7,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a2=%ymm6,>r=%ymm3 vpand % ymm11, % ymm6, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm3,<r12=%ymm9,<r12=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#12,<a1=reg256#9,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a1=%ymm8,>r=%ymm3 vpand % ymm11, % ymm8, % ymm3 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#4,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm3,<r11=%ymm0,<r11=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#12,<a0=reg256#11,>r10=reg256#4 # asm 2: vpand <b10=%ymm11,<a0=%ymm10,>r10=%ymm3 vpand % ymm11, % ymm10, % ymm3 # qhasm: b9 = mem128[ input_2 + 144 ] x2 # asm 1: vbroadcasti128 144(<input_2=int64#3), >b9=reg256#12 # asm 2: vbroadcasti128 144(<input_2=%rdx), >b9=%ymm11 vbroadcasti128 144( % rdx), % ymm11 # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b9=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#13,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm12,<r14=%ymm5,<r14=%ymm5 vpxor % ymm12, % ymm5, % ymm5 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#6,448(<ptr=int64#4) # asm 2: vmovupd <r14=%ymm5,448(<ptr=%rcx) vmovupd % ymm5, 448( % rcx) # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#12,<a4=reg256#3,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a4=%ymm2,>r=%ymm5 vpand % ymm11, % ymm2, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm5,<r13=%ymm7,<r13=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#12,<a3=reg256#5,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a3=%ymm4,>r=%ymm5 vpand % ymm11, % ymm4, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm5,<r12=%ymm9,<r12=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#12,<a2=reg256#7,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a2=%ymm6,>r=%ymm5 vpand % ymm11, % ymm6, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm5,<r11=%ymm0,<r11=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#12,<a1=reg256#9,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a1=%ymm8,>r=%ymm5 vpand % ymm11, % ymm8, % ymm5 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#6,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm5,<r10=%ymm3,<r10=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#12,<a0=reg256#11,>r9=reg256#6 # asm 2: vpand <b9=%ymm11,<a0=%ymm10,>r9=%ymm5 vpand % ymm11, % ymm10, % ymm5 # qhasm: b8 = mem128[ input_2 + 128 ] x2 # asm 1: vbroadcasti128 128(<input_2=int64#3), >b8=reg256#12 # asm 2: vbroadcasti128 128(<input_2=%rdx), >b8=%ymm11 vbroadcasti128 128( % rdx), % ymm11 # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b8=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#13,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm12,<r13=%ymm7,<r13=%ymm7 vpxor % ymm12, % ymm7, % ymm7 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#8,416(<ptr=int64#4) # asm 2: vmovupd <r13=%ymm7,416(<ptr=%rcx) vmovupd % ymm7, 416( % rcx) # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#12,<a4=reg256#3,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a4=%ymm2,>r=%ymm7 vpand % ymm11, % ymm2, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm7,<r12=%ymm9,<r12=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#12,<a3=reg256#5,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a3=%ymm4,>r=%ymm7 vpand % ymm11, % ymm4, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm7,<r11=%ymm0,<r11=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#12,<a2=reg256#7,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a2=%ymm6,>r=%ymm7 vpand % ymm11, % ymm6, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm7,<r10=%ymm3,<r10=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#12,<a1=reg256#9,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a1=%ymm8,>r=%ymm7 vpand % ymm11, % ymm8, % ymm7 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#8,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm7,<r9=%ymm5,<r9=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#12,<a0=reg256#11,>r8=reg256#8 # asm 2: vpand <b8=%ymm11,<a0=%ymm10,>r8=%ymm7 vpand % ymm11, % ymm10, % ymm7 # qhasm: b7 = mem128[ input_2 + 112 ] x2 # asm 1: vbroadcasti128 112(<input_2=int64#3), >b7=reg256#12 # asm 2: vbroadcasti128 112(<input_2=%rdx), >b7=%ymm11 vbroadcasti128 112( % rdx), % ymm11 # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b7=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#13,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm12,<r12=%ymm9,<r12=%ymm9 vpxor % ymm12, % ymm9, % ymm9 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#10,384(<ptr=int64#4) # asm 2: vmovupd <r12=%ymm9,384(<ptr=%rcx) vmovupd % ymm9, 384( % rcx) # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#12,<a4=reg256#3,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a4=%ymm2,>r=%ymm9 vpand % ymm11, % ymm2, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm9,<r11=%ymm0,<r11=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#12,<a3=reg256#5,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a3=%ymm4,>r=%ymm9 vpand % ymm11, % ymm4, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm9,<r10=%ymm3,<r10=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#12,<a2=reg256#7,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a2=%ymm6,>r=%ymm9 vpand % ymm11, % ymm6, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm9,<r9=%ymm5,<r9=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#12,<a1=reg256#9,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a1=%ymm8,>r=%ymm9 vpand % ymm11, % ymm8, % ymm9 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#10,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm9,<r8=%ymm7,<r8=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#12,<a0=reg256#11,>r7=reg256#10 # asm 2: vpand <b7=%ymm11,<a0=%ymm10,>r7=%ymm9 vpand % ymm11, % ymm10, % ymm9 # qhasm: b6 = mem128[ input_2 + 96 ] x2 # asm 1: vbroadcasti128 96(<input_2=int64#3), >b6=reg256#12 # asm 2: vbroadcasti128 96(<input_2=%rdx), >b6=%ymm11 vbroadcasti128 96( % rdx), % ymm11 # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b6=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#13,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm12,<r11=%ymm0,<r11=%ymm0 vpxor % ymm12, % ymm0, % ymm0 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<ptr=int64#4) # asm 2: vmovupd <r11=%ymm0,352(<ptr=%rcx) vmovupd % ymm0, 352( % rcx) # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#12,<a4=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a4=%ymm2,>r=%ymm0 vpand % ymm11, % ymm2, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm0,<r10=%ymm3,<r10=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#12,<a3=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a3=%ymm4,>r=%ymm0 vpand % ymm11, % ymm4, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm0,<r9=%ymm5,<r9=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#12,<a2=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a2=%ymm6,>r=%ymm0 vpand % ymm11, % ymm6, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm0,<r8=%ymm7,<r8=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#12,<a1=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a1=%ymm8,>r=%ymm0 vpand % ymm11, % ymm8, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm0,<r7=%ymm9,<r7=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#12,<a0=reg256#11,>r6=reg256#1 # asm 2: vpand <b6=%ymm11,<a0=%ymm10,>r6=%ymm0 vpand % ymm11, % ymm10, % ymm0 # qhasm: b5 = mem128[ input_2 + 80 ] x2 # asm 1: vbroadcasti128 80(<input_2=int64#3), >b5=reg256#12 # asm 2: vbroadcasti128 80(<input_2=%rdx), >b5=%ymm11 vbroadcasti128 80( % rdx), % ymm11 # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b5=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#13,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm12,<r10=%ymm3,<r10=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#4,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm3,320(<ptr=%rcx) vmovupd % ymm3, 320( % rcx) # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#12,<a4=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a4=%ymm2,>r=%ymm3 vpand % ymm11, % ymm2, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm3,<r9=%ymm5,<r9=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#12,<a3=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a3=%ymm4,>r=%ymm3 vpand % ymm11, % ymm4, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm3,<r8=%ymm7,<r8=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#12,<a2=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a2=%ymm6,>r=%ymm3 vpand % ymm11, % ymm6, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm3,<r7=%ymm9,<r7=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#12,<a1=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a1=%ymm8,>r=%ymm3 vpand % ymm11, % ymm8, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#12,<a0=reg256#11,>r5=reg256#4 # asm 2: vpand <b5=%ymm11,<a0=%ymm10,>r5=%ymm3 vpand % ymm11, % ymm10, % ymm3 # qhasm: b4 = mem128[ input_2 + 64 ] x2 # asm 1: vbroadcasti128 64(<input_2=int64#3), >b4=reg256#12 # asm 2: vbroadcasti128 64(<input_2=%rdx), >b4=%ymm11 vbroadcasti128 64( % rdx), % ymm11 # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b4=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#13,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm12,<r9=%ymm5,<r9=%ymm5 vpxor % ymm12, % ymm5, % ymm5 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#6,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm5,288(<ptr=%rcx) vmovupd % ymm5, 288( % rcx) # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#12,<a4=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a4=%ymm2,>r=%ymm5 vpand % ymm11, % ymm2, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm5,<r8=%ymm7,<r8=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#12,<a3=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a3=%ymm4,>r=%ymm5 vpand % ymm11, % ymm4, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm5,<r7=%ymm9,<r7=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#12,<a2=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a2=%ymm6,>r=%ymm5 vpand % ymm11, % ymm6, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#12,<a1=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a1=%ymm8,>r=%ymm5 vpand % ymm11, % ymm8, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#12,<a0=reg256#11,>r4=reg256#6 # asm 2: vpand <b4=%ymm11,<a0=%ymm10,>r4=%ymm5 vpand % ymm11, % ymm10, % ymm5 # qhasm: b3 = mem128[ input_2 + 48 ] x2 # asm 1: vbroadcasti128 48(<input_2=int64#3), >b3=reg256#12 # asm 2: vbroadcasti128 48(<input_2=%rdx), >b3=%ymm11 vbroadcasti128 48( % rdx), % ymm11 # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b3=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#13,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm12,<r8=%ymm7,<r8=%ymm7 vpxor % ymm12, % ymm7, % ymm7 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#8,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm7,256(<ptr=%rcx) vmovupd % ymm7, 256( % rcx) # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#12,<a4=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a4=%ymm2,>r=%ymm7 vpand % ymm11, % ymm2, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm7,<r7=%ymm9,<r7=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#12,<a3=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a3=%ymm4,>r=%ymm7 vpand % ymm11, % ymm4, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#12,<a2=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a2=%ymm6,>r=%ymm7 vpand % ymm11, % ymm6, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#12,<a1=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a1=%ymm8,>r=%ymm7 vpand % ymm11, % ymm8, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#12,<a0=reg256#11,>r3=reg256#8 # asm 2: vpand <b3=%ymm11,<a0=%ymm10,>r3=%ymm7 vpand % ymm11, % ymm10, % ymm7 # qhasm: b2 = mem128[ input_2 + 32 ] x2 # asm 1: vbroadcasti128 32(<input_2=int64#3), >b2=reg256#12 # asm 2: vbroadcasti128 32(<input_2=%rdx), >b2=%ymm11 vbroadcasti128 32( % rdx), % ymm11 # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b2=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#13,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm12,<r7=%ymm9,<r7=%ymm9 vpxor % ymm12, % ymm9, % ymm9 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#10,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm9,224(<ptr=%rcx) vmovupd % ymm9, 224( % rcx) # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#12,<a4=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a4=%ymm2,>r=%ymm9 vpand % ymm11, % ymm2, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#12,<a3=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a3=%ymm4,>r=%ymm9 vpand % ymm11, % ymm4, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#12,<a2=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a2=%ymm6,>r=%ymm9 vpand % ymm11, % ymm6, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#12,<a1=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a1=%ymm8,>r=%ymm9 vpand % ymm11, % ymm8, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#12,<a0=reg256#11,>r2=reg256#10 # asm 2: vpand <b2=%ymm11,<a0=%ymm10,>r2=%ymm9 vpand % ymm11, % ymm10, % ymm9 # qhasm: b1 = mem128[ input_2 + 16 ] x2 # asm 1: vbroadcasti128 16(<input_2=int64#3), >b1=reg256#12 # asm 2: vbroadcasti128 16(<input_2=%rdx), >b1=%ymm11 vbroadcasti128 16( % rdx), % ymm11 # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b1=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#13,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm12,<r6=%ymm0,<r6=%ymm0 vpxor % ymm12, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%rcx) vmovupd % ymm0, 192( % rcx) # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#12,<a4=reg256#3,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a4=%ymm2,>r=%ymm0 vpand % ymm11, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#12,<a3=reg256#5,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a3=%ymm4,>r=%ymm0 vpand % ymm11, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#12,<a2=reg256#7,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a2=%ymm6,>r=%ymm0 vpand % ymm11, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#12,<a1=reg256#9,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a1=%ymm8,>r=%ymm0 vpand % ymm11, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#12,<a0=reg256#11,>r1=reg256#1 # asm 2: vpand <b1=%ymm11,<a0=%ymm10,>r1=%ymm0 vpand % ymm11, % ymm10, % ymm0 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#12 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm11 vbroadcasti128 0( % rdx), % ymm11 # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#12,<a5=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a5=%ymm1,>r=%ymm1 vpand % ymm11, % ymm1, % ymm1 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#2,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm1,<r5=%ymm3,<r5=%ymm3 vpxor % ymm1, % ymm3, % ymm3 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%rcx) vmovupd % ymm3, 160( % rcx) # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#12,<a4=reg256#3,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a4=%ymm2,>r=%ymm1 vpand % ymm11, % ymm2, % ymm1 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#2,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm1,<r4=%ymm5,<r4=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#12,<a3=reg256#5,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a3=%ymm4,>r=%ymm1 vpand % ymm11, % ymm4, % ymm1 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#2,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm1,<r3=%ymm7,<r3=%ymm7 vpxor % ymm1, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#12,<a2=reg256#7,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a2=%ymm6,>r=%ymm1 vpand % ymm11, % ymm6, % ymm1 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#2,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm1,<r2=%ymm9,<r2=%ymm9 vpxor % ymm1, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#12,<a1=reg256#9,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a1=%ymm8,>r=%ymm1 vpand % ymm11, % ymm8, % ymm1 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#2,<r1=reg256#1,<r1=reg256#1 # asm 2: vpxor <r=%ymm1,<r1=%ymm0,<r1=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#12,<a0=reg256#11,>r0=reg256#2 # asm 2: vpand <b0=%ymm11,<a0=%ymm10,>r0=%ymm1 vpand % ymm11, % ymm10, % ymm1 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%rcx) vmovupd % ymm5, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%rcx) vmovupd % ymm7, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%rcx) vmovupd % ymm9, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm0,32(<ptr=%rcx) vmovupd % ymm0, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#2,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm1,0(<ptr=%rcx) vmovupd % ymm1, 0( % rcx) # qhasm: vzeroupper vzeroupper # qhasm: h22 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#4),>h22=reg128#1 # asm 2: movdqu 528(<ptr=%rcx),>h22=%xmm0 movdqu 528( % rcx), % xmm0 # qhasm: h13 = h22 # asm 1: movdqa <h22=reg128#1,>h13=reg128#2 # asm 2: movdqa <h22=%xmm0,>h13=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h10 = h22 # asm 1: movdqa <h22=reg128#1,>h10=reg128#1 # asm 2: movdqa <h22=%xmm0,>h10=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h21 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#4),>h21=reg128#3 # asm 2: movdqu 496(<ptr=%rcx),>h21=%xmm2 movdqu 496( % rcx), % xmm2 # qhasm: h12 = h21 # asm 1: movdqa <h21=reg128#3,>h12=reg128#4 # asm 2: movdqa <h21=%xmm2,>h12=%xmm3 movdqa % xmm2, % xmm3 # qhasm: h9 = h21 # asm 1: movdqa <h21=reg128#3,>h9=reg128#3 # asm 2: movdqa <h21=%xmm2,>h9=%xmm2 movdqa % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#4),>h20=reg128#5 # asm 2: movdqu 464(<ptr=%rcx),>h20=%xmm4 movdqu 464( % rcx), % xmm4 # qhasm: h11 = h20 # asm 1: movdqa <h20=reg128#5,>h11=reg128#6 # asm 2: movdqa <h20=%xmm4,>h11=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h8 = h20 # asm 1: movdqa <h20=reg128#5,>h8=reg128#5 # asm 2: movdqa <h20=%xmm4,>h8=%xmm4 movdqa % xmm4, % xmm4 # qhasm: h19 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#4),>h19=reg128#7 # asm 2: movdqu 432(<ptr=%rcx),>h19=%xmm6 movdqu 432( % rcx), % xmm6 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor <h19=%xmm6,<h10=%xmm0,>h10=%xmm0 vpxor % xmm6, % xmm0, % xmm0 # qhasm: h7 = h19 # asm 1: movdqa <h19=reg128#7,>h7=reg128#7 # asm 2: movdqa <h19=%xmm6,>h7=%xmm6 movdqa % xmm6, % xmm6 # qhasm: h18 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#4),>h18=reg128#8 # asm 2: movdqu 400(<ptr=%rcx),>h18=%xmm7 movdqu 400( % rcx), % xmm7 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#8,<h9=reg128#3,>h9=reg128#3 # asm 2: vpxor <h18=%xmm7,<h9=%xmm2,>h9=%xmm2 vpxor % xmm7, % xmm2, % xmm2 # qhasm: h6 = h18 # asm 1: movdqa <h18=reg128#8,>h6=reg128#8 # asm 2: movdqa <h18=%xmm7,>h6=%xmm7 movdqa % xmm7, % xmm7 # qhasm: h17 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#4),>h17=reg128#9 # asm 2: movdqu 368(<ptr=%rcx),>h17=%xmm8 movdqu 368( % rcx), % xmm8 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#9,<h8=reg128#5,>h8=reg128#5 # asm 2: vpxor <h17=%xmm8,<h8=%xmm4,>h8=%xmm4 vpxor % xmm8, % xmm4, % xmm4 # qhasm: h5 = h17 # asm 1: movdqa <h17=reg128#9,>h5=reg128#9 # asm 2: movdqa <h17=%xmm8,>h5=%xmm8 movdqa % xmm8, % xmm8 # qhasm: h16 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#4),>h16=reg128#10 # asm 2: movdqu 336(<ptr=%rcx),>h16=%xmm9 movdqu 336( % rcx), % xmm9 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#4),<h16=reg128#10,>h16=reg128#10 # asm 2: vpxor 512(<ptr=%rcx),<h16=%xmm9,>h16=%xmm9 vpxor 512( % rcx), % xmm9, % xmm9 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#10,<h7=reg128#7,>h7=reg128#7 # asm 2: vpxor <h16=%xmm9,<h7=%xmm6,>h7=%xmm6 vpxor % xmm9, % xmm6, % xmm6 # qhasm: h4 = h16 # asm 1: movdqa <h16=reg128#10,>h4=reg128#10 # asm 2: movdqa <h16=%xmm9,>h4=%xmm9 movdqa % xmm9, % xmm9 # qhasm: h15 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#4),>h15=reg128#11 # asm 2: movdqu 304(<ptr=%rcx),>h15=%xmm10 movdqu 304( % rcx), % xmm10 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#4),<h15=reg128#11,>h15=reg128#11 # asm 2: vpxor 480(<ptr=%rcx),<h15=%xmm10,>h15=%xmm10 vpxor 480( % rcx), % xmm10, % xmm10 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#11,<h6=reg128#8,>h6=reg128#8 # asm 2: vpxor <h15=%xmm10,<h6=%xmm7,>h6=%xmm7 vpxor % xmm10, % xmm7, % xmm7 # qhasm: h3 = h15 # asm 1: movdqa <h15=reg128#11,>h3=reg128#11 # asm 2: movdqa <h15=%xmm10,>h3=%xmm10 movdqa % xmm10, % xmm10 # qhasm: h14 = mem128[ ptr + 272 ] # asm 1: movdqu 272(<ptr=int64#4),>h14=reg128#12 # asm 2: movdqu 272(<ptr=%rcx),>h14=%xmm11 movdqu 272( % rcx), % xmm11 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#4),<h14=reg128#12,>h14=reg128#12 # asm 2: vpxor 448(<ptr=%rcx),<h14=%xmm11,>h14=%xmm11 vpxor 448( % rcx), % xmm11, % xmm11 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#12,<h5=reg128#9,>h5=reg128#9 # asm 2: vpxor <h14=%xmm11,<h5=%xmm8,>h5=%xmm8 vpxor % xmm11, % xmm8, % xmm8 # qhasm: h2 = h14 # asm 1: movdqa <h14=reg128#12,>h2=reg128#12 # asm 2: movdqa <h14=%xmm11,>h2=%xmm11 movdqa % xmm11, % xmm11 # qhasm: h13 = h13 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#4),<h13=reg128#2,>h13=reg128#2 # asm 2: vpxor 240(<ptr=%rcx),<h13=%xmm1,>h13=%xmm1 vpxor 240( % rcx), % xmm1, % xmm1 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#4),<h13=reg128#2,>h13=reg128#2 # asm 2: vpxor 416(<ptr=%rcx),<h13=%xmm1,>h13=%xmm1 vpxor 416( % rcx), % xmm1, % xmm1 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#2,<h4=reg128#10,>h4=reg128#10 # asm 2: vpxor <h13=%xmm1,<h4=%xmm9,>h4=%xmm9 vpxor % xmm1, % xmm9, % xmm9 # qhasm: h1 = h13 # asm 1: movdqa <h13=reg128#2,>h1=reg128#2 # asm 2: movdqa <h13=%xmm1,>h1=%xmm1 movdqa % xmm1, % xmm1 # qhasm: h12 = h12 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#4),<h12=reg128#4,>h12=reg128#4 # asm 2: vpxor 208(<ptr=%rcx),<h12=%xmm3,>h12=%xmm3 vpxor 208( % rcx), % xmm3, % xmm3 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#4),<h12=reg128#4,>h12=reg128#4 # asm 2: vpxor 384(<ptr=%rcx),<h12=%xmm3,>h12=%xmm3 vpxor 384( % rcx), % xmm3, % xmm3 # qhasm: h3 = h3 ^ h12 # asm 1: vpxor <h12=reg128#4,<h3=reg128#11,>h3=reg128#11 # asm 2: vpxor <h12=%xmm3,<h3=%xmm10,>h3=%xmm10 vpxor % xmm3, % xmm10, % xmm10 # qhasm: h0 = h12 # asm 1: movdqa <h12=reg128#4,>h0=reg128#4 # asm 2: movdqa <h12=%xmm3,>h0=%xmm3 movdqa % xmm3, % xmm3 # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#4),<h11=reg128#6,>h11=reg128#6 # asm 2: vpxor 352(<ptr=%rcx),<h11=%xmm5,>h11=%xmm5 vpxor 352( % rcx), % xmm5, % xmm5 # qhasm: h11 = h11 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#4),<h11=reg128#6,>h11=reg128#6 # asm 2: vpxor 176(<ptr=%rcx),<h11=%xmm5,>h11=%xmm5 vpxor 176( % rcx), % xmm5, % xmm5 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#6,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm5,176(<input_0=%rdi) movdqu % xmm5, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#4),<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor 320(<ptr=%rcx),<h10=%xmm0,>h10=%xmm0 vpxor 320( % rcx), % xmm0, % xmm0 # qhasm: h10 = h10 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#4),<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor 144(<ptr=%rcx),<h10=%xmm0,>h10=%xmm0 vpxor 144( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#1,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm0,160(<input_0=%rdi) movdqu % xmm0, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#4),<h9=reg128#3,>h9=reg128#1 # asm 2: vpxor 288(<ptr=%rcx),<h9=%xmm2,>h9=%xmm0 vpxor 288( % rcx), % xmm2, % xmm0 # qhasm: h9 = h9 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#4),<h9=reg128#1,>h9=reg128#1 # asm 2: vpxor 112(<ptr=%rcx),<h9=%xmm0,>h9=%xmm0 vpxor 112( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#1,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm0,144(<input_0=%rdi) movdqu % xmm0, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#4),<h8=reg128#5,>h8=reg128#1 # asm 2: vpxor 256(<ptr=%rcx),<h8=%xmm4,>h8=%xmm0 vpxor 256( % rcx), % xmm4, % xmm0 # qhasm: h8 = h8 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#4),<h8=reg128#1,>h8=reg128#1 # asm 2: vpxor 80(<ptr=%rcx),<h8=%xmm0,>h8=%xmm0 vpxor 80( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#1,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm0,128(<input_0=%rdi) movdqu % xmm0, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#4),<h7=reg128#7,>h7=reg128#1 # asm 2: vpxor 224(<ptr=%rcx),<h7=%xmm6,>h7=%xmm0 vpxor 224( % rcx), % xmm6, % xmm0 # qhasm: h7 = h7 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#4),<h7=reg128#1,>h7=reg128#1 # asm 2: vpxor 48(<ptr=%rcx),<h7=%xmm0,>h7=%xmm0 vpxor 48( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#1,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm0,112(<input_0=%rdi) movdqu % xmm0, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#4),<h6=reg128#8,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%rcx),<h6=%xmm7,>h6=%xmm0 vpxor 192( % rcx), % xmm7, % xmm0 # qhasm: h6 = h6 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#4),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 16(<ptr=%rcx),<h6=%xmm0,>h6=%xmm0 vpxor 16( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#4),<h5=reg128#9,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%rcx),<h5=%xmm8,>h5=%xmm0 vpxor 160( % rcx), % xmm8, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#4),<h4=reg128#10,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%rcx),<h4=%xmm9,>h4=%xmm0 vpxor 128( % rcx), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#4),<h3=reg128#11,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%rcx),<h3=%xmm10,>h3=%xmm0 vpxor 96( % rcx), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#4),<h2=reg128#12,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%rcx),<h2=%xmm11,>h2=%xmm0 vpxor 64( % rcx), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#4),<h1=reg128#2,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%rcx),<h1=%xmm1,>h1=%xmm0 vpxor 32( % rcx), % xmm1, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#4),<h0=reg128#4,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%rcx),<h0=%xmm3,>h0=%xmm0 vpxor 0( % rcx), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
7,454
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t=int64#2 # asm 2: movq 88(<input_0=%rdi),>t=%rsi movq 88( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t=int64#2 # asm 2: movq 80(<input_0=%rdi),>t=%rsi movq 80( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t=int64#2 # asm 2: movq 72(<input_0=%rdi),>t=%rsi movq 72( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t=int64#2 # asm 2: movq 64(<input_0=%rdi),>t=%rsi movq 64( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t=int64#2 # asm 2: movq 56(<input_0=%rdi),>t=%rsi movq 56( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t=int64#2 # asm 2: movq 48(<input_0=%rdi),>t=%rsi movq 48( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t=int64#2 # asm 2: movq 40(<input_0=%rdi),>t=%rsi movq 40( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t=int64#2 # asm 2: movq 32(<input_0=%rdi),>t=%rsi movq 32( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t=int64#2 # asm 2: movq 24(<input_0=%rdi),>t=%rsi movq 24( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t=int64#2 # asm 2: movq 16(<input_0=%rdi),>t=%rsi movq 16( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t=int64#2 # asm 2: movq 8(<input_0=%rdi),>t=%rsi movq 8( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t=int64#1 # asm 2: movq 0(<input_0=%rdi),>t=%rdi movq 0( % rdi), % rdi # qhasm: c = count(t) # asm 1: popcnt <t=int64#1, >c=int64#1 # asm 2: popcnt <t=%rdi, >c=%rdi popcnt % rdi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
14,105
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 260780 # asm 1: add $260780,<input_1=int64#2 # asm 2: add $260780,<input_1=%rsi add $260780, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 768 # asm 1: mov $768,>row=int64#5 # asm 2: mov $768,>row=%r8 mov $768, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm2 vmovupd 128( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm2 vmovupd 160( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 192 ] # asm 1: vmovupd 192(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 192(<input_2=%rdx),>ee=%ymm2 vmovupd 192( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 224 ] # asm 1: vmovupd 224(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 224(<input_2=%rdx),>ee=%ymm2 vmovupd 224( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 256 ] # asm 1: vmovupd 256(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 256(<input_2=%rdx),>ee=%ymm2 vmovupd 256( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 288 ] # asm 1: vmovupd 288(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 288(<input_2=%rdx),>ee=%ymm2 vmovupd 288( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 320 ] # asm 1: vmovupd 320(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 320(<input_2=%rdx),>ee=%ymm2 vmovupd 320( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 352 ] # asm 1: vmovupd 352(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 352(<input_2=%rdx),>ee=%ymm2 vmovupd 352( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 384 ] # asm 1: vmovupd 384(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 384(<input_2=%rdx),>ee=%ymm2 vmovupd 384( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint64 *)(input_1 + 320) # asm 1: movq 320(<input_1=int64#2),>s=int64#6 # asm 2: movq 320(<input_1=%rsi),>s=%r9 movq 320( % rsi), % r9 # qhasm: e = *(uint64 *)(input_2 + 416) # asm 1: movq 416(<input_2=int64#3),>e=int64#7 # asm 2: movq 416(<input_2=%rdx),>e=%rax movq 416( % rdx), % rax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = *(uint64 *)(input_1 + 328) # asm 1: movq 328(<input_1=int64#2),>p=int64#7 # asm 2: movq 328(<input_1=%rsi),>p=%rax movq 328( % rsi), % rax # qhasm: e = *(uint64 *)(input_2 + 424) # asm 1: movq 424(<input_2=int64#3),>e=int64#8 # asm 2: movq 424(<input_2=%rdx),>e=%r10 movq 424( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: p = *(uint32 *)(input_1 + 336) # asm 1: movl 336(<input_1=int64#2),>p=int64#7d # asm 2: movl 336(<input_1=%rsi),>p=%eax movl 336( % rsi), % eax # qhasm: e = *(uint32 *)(input_2 + 432) # asm 1: movl 432(<input_2=int64#3),>e=int64#8d # asm 2: movl 432(<input_2=%rdx),>e=%r10d movl 432( % rdx), % r10d # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 340 # asm 1: sub $340,<input_1=int64#2 # asm 2: sub $340,<input_1=%rsi sub $340, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
56,484
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: r11 = a11 & b0 # asm 1: vpand <a11=reg256#2,<b0=reg256#1,>r11=reg256#3 # asm 2: vpand <a11=%ymm1,<b0=%ymm0,>r11=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r12 = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#2,>r12=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm1,>r12=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r13 = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#2,>r13=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm1,>r13=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r14 = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#2,>r14=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm1,>r14=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r15 = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#2,>r15=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm1,>r15=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r16 = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#2,>r16=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm1,>r16=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r17 = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#2,>r17=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm1,>r17=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r18 = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#2,>r18=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm1,>r18=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r19 = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#2,>r19=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm1,>r19=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r20 = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#2,>r20=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm1,>r20=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r21 = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#2,>r21=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm1,>r21=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r22 = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#2,>r22=reg256#2 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm1,>r22=%ymm1 vpand 352( % rdx), % ymm1, % ymm1 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#2,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r22=%ymm1,<r13=%ymm4,<r13=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r10 = r22 # asm 1: vmovapd <r22=reg256#2,>r10=reg256#2 # asm 2: vmovapd <r22=%ymm1,>r10=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#14 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm13 vmovupd 320( % rsi), % ymm13 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a10=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#12,<r20=reg256#12 # asm 2: vpxor <r=%ymm14,<r20=%ymm11,<r20=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#14,<r21=reg256#13,<r21=reg256#13 # asm 2: vpxor <r=%ymm13,<r21=%ymm12,<r21=%ymm12 vpxor % ymm13, % ymm12, % ymm12 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#13,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r21=%ymm12,<r12=%ymm3,<r12=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r9 = r21 # asm 1: vmovapd <r21=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r21=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#14 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm13 vmovupd 288( % rsi), % ymm13 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a9=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#14,<r20=reg256#12,<r20=reg256#12 # asm 2: vpxor <r=%ymm13,<r20=%ymm11,<r20=%ymm11 vpxor % ymm13, % ymm11, % ymm11 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#12,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r20=%ymm11,<r11=%ymm2,<r11=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r8 = r20 # asm 1: vmovapd <r20=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r20=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#14 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm13 vmovupd 256( % rsi), % ymm13 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a8=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#14,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm13,<r19=%ymm10,<r19=%ymm10 vpxor % ymm13, % ymm10, % ymm10 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#11,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r19=%ymm10,<r10=%ymm1,<r10=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r7 = r19 # asm 1: vmovapd <r19=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r19=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#14 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm13 vmovupd 224( % rsi), % ymm13 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a7=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#14,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm13,<r18=%ymm9,<r18=%ymm9 vpxor % ymm13, % ymm9, % ymm9 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r6 = r18 # asm 1: vmovapd <r18=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r18=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#14 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm13 vmovupd 192( % rsi), % ymm13 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a6=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#14,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm13,<r17=%ymm8,<r17=%ymm8 vpxor % ymm13, % ymm8, % ymm8 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r5 = r17 # asm 1: vmovapd <r17=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r17=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#14 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm13 vmovupd 160( % rsi), % ymm13 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a5=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#14,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm13,<r16=%ymm7,<r16=%ymm7 vpxor % ymm13, % ymm7, % ymm7 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r4 = r16 # asm 1: vmovapd <r16=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r16=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#14 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm13 vmovupd 128( % rsi), % ymm13 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a4=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#14,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm13,<r15=%ymm6,<r15=%ymm6 vpxor % ymm13, % ymm6, % ymm6 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r3 = r15 # asm 1: vmovapd <r15=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r15=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#14 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm13 vmovupd 96( % rsi), % ymm13 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a3=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#14,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm13,<r14=%ymm5,<r14=%ymm5 vpxor % ymm13, % ymm5, % ymm5 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r2 = r14 # asm 1: vmovapd <r14=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r14=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#14 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm13 vmovupd 64( % rsi), % ymm13 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a2=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#14,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm13,<r13=%ymm4,<r13=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r1 = r13 # asm 1: vmovapd <r13=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r13=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#14 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm13 vmovupd 32( % rsi), % ymm13 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a1=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#15,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm14,<r1=%ymm4,<r1=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#14,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm13,<r12=%ymm3,<r12=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r3 ^= r12 # asm 1: vpxor <r12=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r12=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r0 = r12 # asm 1: vmovapd <r12=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r12=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#14 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm13 vmovupd 0( % rsi), % ymm13 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#14,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm13,<b0=%ymm0,>r=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 32( % rdx), % ymm13, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 64( % rdx), % ymm13, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 96( % rdx), % ymm13, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 128( % rdx), % ymm13, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 160( % rdx), % ymm13, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 192( % rdx), % ymm13, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 224( % rdx), % ymm13, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 256( % rdx), % ymm13, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 288( % rdx), % ymm13, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 320( % rdx), % ymm13, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm0,<r10=%ymm1,<r10=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 352( % rdx), % ymm13, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm0,<r11=%ymm2,<r11=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#3,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm2,352(<input_0=%rdi) vmovupd % ymm2, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#2,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm1,320(<input_0=%rdi) vmovupd % ymm1, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
17,918
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 523740 # asm 1: add $523740,<input_1=int64#2 # asm 2: add $523740,<input_1=%rsi add $523740, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1248 # asm 1: mov $1248,>row=int64#5 # asm 2: mov $1248,>row=%r8 mov $1248, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 156 ] # asm 1: vmovupd 156(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 156(<input_2=%rdx),>ee=%ymm1 vmovupd 156( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 188 ] # asm 1: vmovupd 188(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 188(<input_2=%rdx),>ee=%ymm2 vmovupd 188( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 220 ] # asm 1: vmovupd 220(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 220(<input_2=%rdx),>ee=%ymm2 vmovupd 220( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 252 ] # asm 1: vmovupd 252(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 252(<input_2=%rdx),>ee=%ymm2 vmovupd 252( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 284 ] # asm 1: vmovupd 284(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 284(<input_2=%rdx),>ee=%ymm2 vmovupd 284( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 316 ] # asm 1: vmovupd 316(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 316(<input_2=%rdx),>ee=%ymm2 vmovupd 316( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 348 ] # asm 1: vmovupd 348(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 348(<input_2=%rdx),>ee=%ymm2 vmovupd 348( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 380 ] # asm 1: vmovupd 380(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 380(<input_2=%rdx),>ee=%ymm2 vmovupd 380( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 412 ] # asm 1: vmovupd 412(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 412(<input_2=%rdx),>ee=%ymm2 vmovupd 412( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 444 ] # asm 1: vmovupd 444(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 444(<input_2=%rdx),>ee=%ymm2 vmovupd 444( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 476 ] # asm 1: vmovupd 476(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 476(<input_2=%rdx),>ee=%ymm2 vmovupd 476( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 508 ] # asm 1: vmovupd 508(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 508(<input_2=%rdx),>ee=%ymm2 vmovupd 508( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 540 ] # asm 1: vmovupd 540(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 540(<input_2=%rdx),>ee=%ymm2 vmovupd 540( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint32 *)(input_1 + 416) # asm 1: movl 416(<input_1=int64#2),>s=int64#6d # asm 2: movl 416(<input_1=%rsi),>s=%r9d movl 416( % rsi), % r9d # qhasm: e = *(uint32 *)(input_2 + 572) # asm 1: movl 572(<input_2=int64#3),>e=int64#7d # asm 2: movl 572(<input_2=%rdx),>e=%eax movl 572( % rdx), % eax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 420 # asm 1: sub $420,<input_1=int64#2 # asm 2: sub $420,<input_1=%rsi sub $420, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: ss = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0 vmovupd 96( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 96 ] = ss # asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: s = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>s=int64#2 # asm 2: movq 128(<input_0=%rdi),>s=%rsi movq 128( % rdi), % rsi # qhasm: e = mem64[ input_2 + 128 ] # asm 1: movq 128(<input_2=int64#3),>e=int64#4 # asm 2: movq 128(<input_2=%rdx),>e=%rcx movq 128( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 128 ] = s # asm 1: movq <s=int64#2,128(<input_0=int64#1) # asm 2: movq <s=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: s = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>s=int64#2 # asm 2: movq 136(<input_0=%rdi),>s=%rsi movq 136( % rdi), % rsi # qhasm: e = mem64[ input_2 + 136 ] # asm 1: movq 136(<input_2=int64#3),>e=int64#4 # asm 2: movq 136(<input_2=%rdx),>e=%rcx movq 136( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 136 ] = s # asm 1: movq <s=int64#2,136(<input_0=int64#1) # asm 2: movq <s=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: s = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>s=int64#2 # asm 2: movq 144(<input_0=%rdi),>s=%rsi movq 144( % rdi), % rsi # qhasm: e = mem64[ input_2 + 144 ] # asm 1: movq 144(<input_2=int64#3),>e=int64#4 # asm 2: movq 144(<input_2=%rdx),>e=%rcx movq 144( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 144 ] = s # asm 1: movq <s=int64#2,144(<input_0=int64#1) # asm 2: movq <s=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: s = *(uint32 *)( input_0 + 152 ) # asm 1: movl 152(<input_0=int64#1),>s=int64#2d # asm 2: movl 152(<input_0=%rdi),>s=%esi movl 152( % rdi), % esi # qhasm: e = *(uint32 *)( input_2 + 152 ) # asm 1: movl 152(<input_2=int64#3),>e=int64#3d # asm 2: movl 152(<input_2=%rdx),>e=%edx movl 152( % rdx), % edx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: *(uint32 *)( input_0 + 152 ) = s # asm 1: movl <s=int64#2d,152(<input_0=int64#1) # asm 2: movl <s=%esi,152(<input_0=%rdi) movl % esi, 152( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
24,351
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 b0 # qhasm: int64 b1 # qhasm: int64 i # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: int64 tmp # qhasm: stack64 back # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $64, % r11 sub % r11, % rsp # qhasm: input_2 += 193 # asm 1: add $193,<input_2=int64#3 # asm 2: add $193,<input_2=%rdx add $193, % rdx # qhasm: *(uint8 *) (input_0 + 193) = 0 # asm 1: movb $0,193(<input_0=int64#1) # asm 2: movb $0,193(<input_0=%rdi) movb $0, 193( % rdi) # qhasm: tmp = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>tmp=int64#4 # asm 2: movzbq 0(<input_2=%rdx),>tmp=%rcx movzbq 0( % rdx), % rcx # qhasm: back = tmp # asm 1: movq <tmp=int64#4,>back=stack64#1 # asm 2: movq <tmp=%rcx,>back=32(%rsp) movq % rcx, 32( % rsp) # qhasm: i = 0 # asm 1: mov $0,>i=int64#4 # asm 2: mov $0,>i=%rcx mov $0, % rcx # qhasm: inner1: ._inner1: # qhasm: addr = input_2 + i # asm 1: lea (<input_2=int64#3,<i=int64#4),>addr=int64#5 # asm 2: lea (<input_2=%rdx,<i=%rcx),>addr=%r8 lea ( % rdx, % rcx), % r8 # qhasm: b0 = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#5),>b0=int64#6 # asm 2: movzbq 0(<addr=%r8),>b0=%r9 movzbq 0( % r8), % r9 # qhasm: b1 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#5),>b1=int64#7 # asm 2: movzbq 1(<addr=%r8),>b1=%rax movzbq 1( % r8), % rax # qhasm: (uint64) b0 >>= 3 # asm 1: shr $3,<b0=int64#6 # asm 2: shr $3,<b0=%r9 shr $3, % r9 # qhasm: b1 <<= 5 # asm 1: shl $5,<b1=int64#7 # asm 2: shl $5,<b1=%rax shl $5, % rax # qhasm: b0 |= b1 # asm 1: or <b1=int64#7,<b0=int64#6 # asm 2: or <b1=%rax,<b0=%r9 or % rax, % r9 # qhasm: *(uint8 *) (addr + 0) = b0 # asm 1: movb <b0=int64#6b,0(<addr=int64#5) # asm 2: movb <b0=%r9b,0(<addr=%r8) movb % r9b, 0( % r8) # qhasm: i += 1 # asm 1: add $1,<i=int64#4 # asm 2: add $1,<i=%rcx add $1, % rcx # qhasm: =? i-676 # asm 1: cmp $676,<i=int64#4 # asm 2: cmp $676,<i=%rcx cmp $676, % rcx # comment:fp stack unchanged by jump # qhasm: goto inner1 if != jne ._inner1 # qhasm: b0 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#5),>b0=int64#4 # asm 2: movzbq 1(<addr=%r8),>b0=%rcx movzbq 1( % r8), % rcx # qhasm: (uint64) b0 >>= 3 # asm 1: shr $3,<b0=int64#4 # asm 2: shr $3,<b0=%rcx shr $3, % rcx # qhasm: *(uint8 *) (addr + 1) = b0 # asm 1: movb <b0=int64#4b,1(<addr=int64#5) # asm 2: movb <b0=%cl,1(<addr=%r8) movb % cl, 1( % r8) # qhasm: input_1 += 1047319 # asm 1: add $1047319,<input_1=int64#2 # asm 2: add $1047319,<input_1=%rsi add $1047319, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1547 # asm 1: mov $1547,>row=int64#5 # asm 2: mov $1547,>row=%r8 mov $1547, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: input_1 -= 677 # asm 1: sub $677,<input_1=int64#2 # asm 2: sub $677,<input_1=%rsi sub $677, % rsi # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm2 vmovupd 32( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm2 vmovupd 64( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm2 vmovupd 96( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm2 vmovupd 128( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm2 vmovupd 160( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 192 ] # asm 1: vmovupd 192(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 192(<input_2=%rdx),>ee=%ymm2 vmovupd 192( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 224 ] # asm 1: vmovupd 224(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 224(<input_2=%rdx),>ee=%ymm2 vmovupd 224( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 256 ] # asm 1: vmovupd 256(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 256(<input_2=%rdx),>ee=%ymm2 vmovupd 256( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 288 ] # asm 1: vmovupd 288(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 288(<input_2=%rdx),>ee=%ymm2 vmovupd 288( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 320 ] # asm 1: vmovupd 320(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 320(<input_2=%rdx),>ee=%ymm2 vmovupd 320( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 352 ] # asm 1: vmovupd 352(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 352(<input_2=%rdx),>ee=%ymm2 vmovupd 352( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 384 ] # asm 1: vmovupd 384(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 384(<input_2=%rdx),>ee=%ymm2 vmovupd 384( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 416 ] # asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1 vmovupd 416( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 416 ] # asm 1: vmovupd 416(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 416(<input_2=%rdx),>ee=%ymm2 vmovupd 416( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 448 ] # asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1 vmovupd 448( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 448 ] # asm 1: vmovupd 448(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 448(<input_2=%rdx),>ee=%ymm2 vmovupd 448( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 480 ] # asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1 vmovupd 480( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 480 ] # asm 1: vmovupd 480(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 480(<input_2=%rdx),>ee=%ymm2 vmovupd 480( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 512 ] # asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1 vmovupd 512( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 512 ] # asm 1: vmovupd 512(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 512(<input_2=%rdx),>ee=%ymm2 vmovupd 512( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 544 ] # asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1 vmovupd 544( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 544 ] # asm 1: vmovupd 544(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 544(<input_2=%rdx),>ee=%ymm2 vmovupd 544( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 576 ] # asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1 vmovupd 576( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 576 ] # asm 1: vmovupd 576(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 576(<input_2=%rdx),>ee=%ymm2 vmovupd 576( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 608 ] # asm 1: vmovupd 608(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 608(<input_1=%rsi),>pp=%ymm1 vmovupd 608( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 608 ] # asm 1: vmovupd 608(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 608(<input_2=%rdx),>ee=%ymm2 vmovupd 608( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 640 ] # asm 1: vmovupd 640(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 640(<input_1=%rsi),>pp=%ymm1 vmovupd 640( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 640 ] # asm 1: vmovupd 640(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 640(<input_2=%rdx),>ee=%ymm2 vmovupd 640( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint32 *) (input_1 + 672) # asm 1: movl 672(<input_1=int64#2),>s=int64#6d # asm 2: movl 672(<input_1=%rsi),>s=%r9d movl 672( % rsi), % r9d # qhasm: e = *(uint32 *) (input_2 + 672) # asm 1: movl 672(<input_2=int64#3),>e=int64#7d # asm 2: movl 672(<input_2=%rdx),>e=%eax movl 672( % rdx), % eax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = *(uint8 *) (input_1 + 676) # asm 1: movzbq 676(<input_1=int64#2),>p=int64#7 # asm 2: movzbq 676(<input_1=%rsi),>p=%rax movzbq 676( % rsi), % rax # qhasm: e = *(uint8 *) (input_2 + 676) # asm 1: movzbq 676(<input_2=int64#3),>e=int64#8 # asm 2: movzbq 676(<input_2=%rdx),>e=%r10 movzbq 676( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: i = 676 # asm 1: mov $676,>i=int64#2 # asm 2: mov $676,>i=%rsi mov $676, % rsi # qhasm: inner2: ._inner2: # qhasm: i -= 1 # asm 1: sub $1,<i=int64#2 # asm 2: sub $1,<i=%rsi sub $1, % rsi # qhasm: addr = input_2 + i # asm 1: lea (<input_2=int64#3,<i=int64#2),>addr=int64#4 # asm 2: lea (<input_2=%rdx,<i=%rsi),>addr=%rcx lea ( % rdx, % rsi), % rcx # qhasm: b0 = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#4),>b0=int64#5 # asm 2: movzbq 0(<addr=%rcx),>b0=%r8 movzbq 0( % rcx), % r8 # qhasm: b1 = *(uint8 *) (addr + 1) # asm 1: movzbq 1(<addr=int64#4),>b1=int64#6 # asm 2: movzbq 1(<addr=%rcx),>b1=%r9 movzbq 1( % rcx), % r9 # qhasm: (uint64) b0 >>= 5 # asm 1: shr $5,<b0=int64#5 # asm 2: shr $5,<b0=%r8 shr $5, % r8 # qhasm: b1 <<= 3 # asm 1: shl $3,<b1=int64#6 # asm 2: shl $3,<b1=%r9 shl $3, % r9 # qhasm: b1 |= b0 # asm 1: or <b0=int64#5,<b1=int64#6 # asm 2: or <b0=%r8,<b1=%r9 or % r8, % r9 # qhasm: *(uint8 *) (addr + 1) = b1 # asm 1: movb <b1=int64#6b,1(<addr=int64#4) # asm 2: movb <b1=%r9b,1(<addr=%rcx) movb % r9b, 1( % rcx) # qhasm: =? i-0 # asm 1: cmp $0,<i=int64#2 # asm 2: cmp $0,<i=%rsi cmp $0, % rsi # comment:fp stack unchanged by jump # qhasm: goto inner2 if != jne ._inner2 # qhasm: tmp = back # asm 1: movq <back=stack64#1,>tmp=int64#2 # asm 2: movq <back=32(%rsp),>tmp=%rsi movq 32( % rsp), % rsi # qhasm: *(uint8 *) (input_2 + 0) = tmp # asm 1: movb <tmp=int64#2b,0(<input_2=int64#3) # asm 2: movb <tmp=%sil,0(<input_2=%rdx) movb % sil, 0( % rdx) # qhasm: input_2 -= 193 # asm 1: sub $193,<input_2=int64#3 # asm 2: sub $193,<input_2=%rdx sub $193, % rdx # qhasm: i = 0 # asm 1: mov $0,>i=int64#2 # asm 2: mov $0,>i=%rsi mov $0, % rsi # qhasm: inner3: ._inner3: # qhasm: s = *(uint8 *) (input_0 + 0) # asm 1: movzbq 0(<input_0=int64#1),>s=int64#4 # asm 2: movzbq 0(<input_0=%rdi),>s=%rcx movzbq 0( % rdi), % rcx # qhasm: e = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>e=int64#5 # asm 2: movzbq 0(<input_2=%rdx),>e=%r8 movzbq 0( % rdx), % r8 # qhasm: s ^= e # asm 1: xor <e=int64#5,<s=int64#4 # asm 2: xor <e=%r8,<s=%rcx xor % r8, % rcx # qhasm: *(uint8 *) (input_0 + 0) = s # asm 1: movb <s=int64#4b,0(<input_0=int64#1) # asm 2: movb <s=%cl,0(<input_0=%rdi) movb % cl, 0( % rdi) # qhasm: i += 1 # asm 1: add $1,<i=int64#2 # asm 2: add $1,<i=%rsi add $1, % rsi # qhasm: input_0 += 1 # asm 1: add $1,<input_0=int64#1 # asm 2: add $1,<input_0=%rdi add $1, % rdi # qhasm: input_2 += 1 # asm 1: add $1,<input_2=int64#3 # asm 2: add $1,<input_2=%rdx add $1, % rdx # qhasm: =? i-193 # asm 1: cmp $193,<i=int64#2 # asm 2: cmp $193,<i=%rsi cmp $193, % rsi # comment:fp stack unchanged by jump # qhasm: goto inner3 if != jne ._inner3 # qhasm: s = *(uint8 *) (input_0 + 0) # asm 1: movzbq 0(<input_0=int64#1),>s=int64#2 # asm 2: movzbq 0(<input_0=%rdi),>s=%rsi movzbq 0( % rdi), % rsi # qhasm: e = *(uint8 *) (input_2 + 0) # asm 1: movzbq 0(<input_2=int64#3),>e=int64#3 # asm 2: movzbq 0(<input_2=%rdx),>e=%rdx movzbq 0( % rdx), % rdx # qhasm: (uint32) e &= 7 # asm 1: and $7,<e=int64#3d # asm 2: and $7,<e=%edx and $7, % edx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: *(uint8 *) (input_0 + 0) = s # asm 1: movb <s=int64#2b,0(<input_0=int64#1) # asm 2: movb <s=%sil,0(<input_0=%rdi) movb % sil, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6960119/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
22,917
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 1044364 # asm 1: add $1044364,<input_1=int64#2 # asm 2: add $1044364,<input_1=%rsi add $1044364, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1664 # asm 1: mov $1664,>row=int64#5 # asm 2: mov $1664,>row=%r8 mov $1664, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 208 ] # asm 1: vmovupd 208(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 208(<input_2=%rdx),>ee=%ymm1 vmovupd 208( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 240 ] # asm 1: vmovupd 240(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 240(<input_2=%rdx),>ee=%ymm2 vmovupd 240( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 272 ] # asm 1: vmovupd 272(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 272(<input_2=%rdx),>ee=%ymm2 vmovupd 272( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 304 ] # asm 1: vmovupd 304(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 304(<input_2=%rdx),>ee=%ymm2 vmovupd 304( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 336 ] # asm 1: vmovupd 336(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 336(<input_2=%rdx),>ee=%ymm2 vmovupd 336( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 368 ] # asm 1: vmovupd 368(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 368(<input_2=%rdx),>ee=%ymm2 vmovupd 368( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 400 ] # asm 1: vmovupd 400(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 400(<input_2=%rdx),>ee=%ymm2 vmovupd 400( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 432 ] # asm 1: vmovupd 432(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 432(<input_2=%rdx),>ee=%ymm2 vmovupd 432( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 464 ] # asm 1: vmovupd 464(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 464(<input_2=%rdx),>ee=%ymm2 vmovupd 464( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 496 ] # asm 1: vmovupd 496(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 496(<input_2=%rdx),>ee=%ymm2 vmovupd 496( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 528 ] # asm 1: vmovupd 528(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 528(<input_2=%rdx),>ee=%ymm2 vmovupd 528( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 560 ] # asm 1: vmovupd 560(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 560(<input_2=%rdx),>ee=%ymm2 vmovupd 560( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 592 ] # asm 1: vmovupd 592(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 592(<input_2=%rdx),>ee=%ymm2 vmovupd 592( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 416 ] # asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1 vmovupd 416( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 624 ] # asm 1: vmovupd 624(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 624(<input_2=%rdx),>ee=%ymm2 vmovupd 624( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 448 ] # asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1 vmovupd 448( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 656 ] # asm 1: vmovupd 656(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 656(<input_2=%rdx),>ee=%ymm2 vmovupd 656( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 480 ] # asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1 vmovupd 480( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 688 ] # asm 1: vmovupd 688(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 688(<input_2=%rdx),>ee=%ymm2 vmovupd 688( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 512 ] # asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1 vmovupd 512( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 720 ] # asm 1: vmovupd 720(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 720(<input_2=%rdx),>ee=%ymm2 vmovupd 720( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 544 ] # asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1 vmovupd 544( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 752 ] # asm 1: vmovupd 752(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 752(<input_2=%rdx),>ee=%ymm2 vmovupd 752( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 576 ] # asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1 vmovupd 576( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 784 ] # asm 1: vmovupd 784(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 784(<input_2=%rdx),>ee=%ymm2 vmovupd 784( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = mem64[input_1 + 608] # asm 1: movq 608(<input_1=int64#2),>s=int64#6 # asm 2: movq 608(<input_1=%rsi),>s=%r9 movq 608( % rsi), % r9 # qhasm: e = mem64[input_2 + 816] # asm 1: movq 816(<input_2=int64#3),>e=int64#7 # asm 2: movq 816(<input_2=%rdx),>e=%rax movq 816( % rdx), % rax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = mem64[input_1 + 616] # asm 1: movq 616(<input_1=int64#2),>p=int64#7 # asm 2: movq 616(<input_1=%rsi),>p=%rax movq 616( % rsi), % rax # qhasm: e = mem64[input_2 + 824] # asm 1: movq 824(<input_2=int64#3),>e=int64#8 # asm 2: movq 824(<input_2=%rdx),>e=%r10 movq 824( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: p = *(uint32 *)(input_1 + 624) # asm 1: movl 624(<input_1=int64#2),>p=int64#7d # asm 2: movl 624(<input_1=%rsi),>p=%eax movl 624( % rsi), % eax # qhasm: e = *(uint32 *)(input_2 + 832) # asm 1: movl 832(<input_2=int64#3),>e=int64#8d # asm 2: movl 832(<input_2=%rdx),>e=%r10d movl 832( % rdx), % r10d # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 628 # asm 1: sub $628,<input_1=int64#2 # asm 2: sub $628,<input_1=%rsi sub $628, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: ss = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0 vmovupd 96( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 96 ] = ss # asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: ss = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 128(<input_0=%rdi),>ss=%ymm0 vmovupd 128( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm1 vmovupd 128( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 128 ] = ss # asm 1: vmovupd <ss=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: ss = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 160(<input_0=%rdi),>ss=%ymm0 vmovupd 160( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm1 vmovupd 160( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 160 ] = ss # asm 1: vmovupd <ss=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: s = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>s=int64#2 # asm 2: movq 192(<input_0=%rdi),>s=%rsi movq 192( % rdi), % rsi # qhasm: e = mem64[ input_2 + 192 ] # asm 1: movq 192(<input_2=int64#3),>e=int64#4 # asm 2: movq 192(<input_2=%rdx),>e=%rcx movq 192( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 192 ] = s # asm 1: movq <s=int64#2,192(<input_0=int64#1) # asm 2: movq <s=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: s = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>s=int64#2 # asm 2: movq 200(<input_0=%rdi),>s=%rsi movq 200( % rdi), % rsi # qhasm: e = mem64[ input_2 + 200 ] # asm 1: movq 200(<input_2=int64#3),>e=int64#3 # asm 2: movq 200(<input_2=%rdx),>e=%rdx movq 200( % rdx), % rdx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: mem64[ input_0 + 200 ] = s # asm 1: movq <s=int64#2,200(<input_0=int64#1) # asm 2: movq <s=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
26,219
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 1357008 # asm 1: add $1357008,<input_1=int64#2 # asm 2: add $1357008,<input_1=%rsi add $1357008, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1664 # asm 1: mov $1664,>row=int64#5 # asm 2: mov $1664,>row=%r8 mov $1664, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 208 ] # asm 1: vmovupd 208(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 208(<input_2=%rdx),>ee=%ymm1 vmovupd 208( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 240 ] # asm 1: vmovupd 240(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 240(<input_2=%rdx),>ee=%ymm2 vmovupd 240( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 272 ] # asm 1: vmovupd 272(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 272(<input_2=%rdx),>ee=%ymm2 vmovupd 272( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 304 ] # asm 1: vmovupd 304(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 304(<input_2=%rdx),>ee=%ymm2 vmovupd 304( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 336 ] # asm 1: vmovupd 336(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 336(<input_2=%rdx),>ee=%ymm2 vmovupd 336( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 368 ] # asm 1: vmovupd 368(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 368(<input_2=%rdx),>ee=%ymm2 vmovupd 368( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 400 ] # asm 1: vmovupd 400(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 400(<input_2=%rdx),>ee=%ymm2 vmovupd 400( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 432 ] # asm 1: vmovupd 432(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 432(<input_2=%rdx),>ee=%ymm2 vmovupd 432( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 464 ] # asm 1: vmovupd 464(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 464(<input_2=%rdx),>ee=%ymm2 vmovupd 464( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 496 ] # asm 1: vmovupd 496(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 496(<input_2=%rdx),>ee=%ymm2 vmovupd 496( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 528 ] # asm 1: vmovupd 528(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 528(<input_2=%rdx),>ee=%ymm2 vmovupd 528( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 560 ] # asm 1: vmovupd 560(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 560(<input_2=%rdx),>ee=%ymm2 vmovupd 560( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 592 ] # asm 1: vmovupd 592(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 592(<input_2=%rdx),>ee=%ymm2 vmovupd 592( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 416 ] # asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1 vmovupd 416( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 624 ] # asm 1: vmovupd 624(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 624(<input_2=%rdx),>ee=%ymm2 vmovupd 624( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 448 ] # asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1 vmovupd 448( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 656 ] # asm 1: vmovupd 656(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 656(<input_2=%rdx),>ee=%ymm2 vmovupd 656( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 480 ] # asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1 vmovupd 480( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 688 ] # asm 1: vmovupd 688(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 688(<input_2=%rdx),>ee=%ymm2 vmovupd 688( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 512 ] # asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1 vmovupd 512( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 720 ] # asm 1: vmovupd 720(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 720(<input_2=%rdx),>ee=%ymm2 vmovupd 720( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 544 ] # asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1 vmovupd 544( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 752 ] # asm 1: vmovupd 752(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 752(<input_2=%rdx),>ee=%ymm2 vmovupd 752( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 576 ] # asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1 vmovupd 576( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 784 ] # asm 1: vmovupd 784(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 784(<input_2=%rdx),>ee=%ymm2 vmovupd 784( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 608 ] # asm 1: vmovupd 608(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 608(<input_1=%rsi),>pp=%ymm1 vmovupd 608( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 816 ] # asm 1: vmovupd 816(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 816(<input_2=%rdx),>ee=%ymm2 vmovupd 816( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 640 ] # asm 1: vmovupd 640(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 640(<input_1=%rsi),>pp=%ymm1 vmovupd 640( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 848 ] # asm 1: vmovupd 848(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 848(<input_2=%rdx),>ee=%ymm2 vmovupd 848( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 672 ] # asm 1: vmovupd 672(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 672(<input_1=%rsi),>pp=%ymm1 vmovupd 672( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 880 ] # asm 1: vmovupd 880(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 880(<input_2=%rdx),>ee=%ymm2 vmovupd 880( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 704 ] # asm 1: vmovupd 704(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 704(<input_1=%rsi),>pp=%ymm1 vmovupd 704( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 912 ] # asm 1: vmovupd 912(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 912(<input_2=%rdx),>ee=%ymm2 vmovupd 912( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 736 ] # asm 1: vmovupd 736(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 736(<input_1=%rsi),>pp=%ymm1 vmovupd 736( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 944 ] # asm 1: vmovupd 944(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 944(<input_2=%rdx),>ee=%ymm2 vmovupd 944( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 768 ] # asm 1: vmovupd 768(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 768(<input_1=%rsi),>pp=%ymm1 vmovupd 768( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 976 ] # asm 1: vmovupd 976(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 976(<input_2=%rdx),>ee=%ymm2 vmovupd 976( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = mem64[input_1 + 800] # asm 1: movq 800(<input_1=int64#2),>s=int64#6 # asm 2: movq 800(<input_1=%rsi),>s=%r9 movq 800( % rsi), % r9 # qhasm: e = mem64[input_2 + 1008] # asm 1: movq 1008(<input_2=int64#3),>e=int64#7 # asm 2: movq 1008(<input_2=%rdx),>e=%rax movq 1008( % rdx), % rax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = mem64[input_1 + 808] # asm 1: movq 808(<input_1=int64#2),>p=int64#7 # asm 2: movq 808(<input_1=%rsi),>p=%rax movq 808( % rsi), % rax # qhasm: e = mem64[input_2 + 1016] # asm 1: movq 1016(<input_2=int64#3),>e=int64#8 # asm 2: movq 1016(<input_2=%rdx),>e=%r10 movq 1016( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 816 # asm 1: sub $816,<input_1=int64#2 # asm 2: sub $816,<input_1=%rsi sub $816, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: ss = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0 vmovupd 96( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 96 ] = ss # asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: ss = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 128(<input_0=%rdi),>ss=%ymm0 vmovupd 128( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm1 vmovupd 128( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 128 ] = ss # asm 1: vmovupd <ss=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: ss = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 160(<input_0=%rdi),>ss=%ymm0 vmovupd 160( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm1 vmovupd 160( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 160 ] = ss # asm 1: vmovupd <ss=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: s = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>s=int64#2 # asm 2: movq 192(<input_0=%rdi),>s=%rsi movq 192( % rdi), % rsi # qhasm: e = mem64[ input_2 + 192 ] # asm 1: movq 192(<input_2=int64#3),>e=int64#4 # asm 2: movq 192(<input_2=%rdx),>e=%rcx movq 192( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 192 ] = s # asm 1: movq <s=int64#2,192(<input_0=int64#1) # asm 2: movq <s=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: s = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>s=int64#2 # asm 2: movq 200(<input_0=%rdi),>s=%rsi movq 200( % rdi), % rsi # qhasm: e = mem64[ input_2 + 200 ] # asm 1: movq 200(<input_2=int64#3),>e=int64#3 # asm 2: movq 200(<input_2=%rdx),>e=%rdx movq 200( % rdx), % rdx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: mem64[ input_0 + 200 ] = s # asm 1: movq <s=int64#2,200(<input_0=int64#1) # asm 2: movq <s=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128f/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,719
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/avx2/basemul.S
#include "cdecl.h" .macro schoolbook off vmovdqa _16XQINV*2(%rcx),%ymm0 vmovdqa (64*\off+ 0)*2(%rsi),%ymm1 # a0 vmovdqa (64*\off+16)*2(%rsi),%ymm2 # b0 vmovdqa (64*\off+32)*2(%rsi),%ymm3 # a1 vmovdqa (64*\off+48)*2(%rsi),%ymm4 # b1 vpmullw %ymm0,%ymm1,%ymm9 # a0.lo vpmullw %ymm0,%ymm2,%ymm10 # b0.lo vpmullw %ymm0,%ymm3,%ymm11 # a1.lo vpmullw %ymm0,%ymm4,%ymm12 # b1.lo vmovdqa (64*\off+ 0)*2(%rdx),%ymm5 # c0 vmovdqa (64*\off+16)*2(%rdx),%ymm6 # d0 vpmulhw %ymm5,%ymm1,%ymm13 # a0c0.hi vpmulhw %ymm6,%ymm1,%ymm1 # a0d0.hi vpmulhw %ymm5,%ymm2,%ymm14 # b0c0.hi vpmulhw %ymm6,%ymm2,%ymm2 # b0d0.hi vmovdqa (64*\off+32)*2(%rdx),%ymm7 # c1 vmovdqa (64*\off+48)*2(%rdx),%ymm8 # d1 vpmulhw %ymm7,%ymm3,%ymm15 # a1c1.hi vpmulhw %ymm8,%ymm3,%ymm3 # a1d1.hi vpmulhw %ymm7,%ymm4,%ymm0 # b1c1.hi vpmulhw %ymm8,%ymm4,%ymm4 # b1d1.hi vmovdqa %ymm13,(%rsp) vpmullw %ymm5,%ymm9,%ymm13 # a0c0.lo vpmullw %ymm6,%ymm9,%ymm9 # a0d0.lo vpmullw %ymm5,%ymm10,%ymm5 # b0c0.lo vpmullw %ymm6,%ymm10,%ymm10 # b0d0.lo vpmullw %ymm7,%ymm11,%ymm6 # a1c1.lo vpmullw %ymm8,%ymm11,%ymm11 # a1d1.lo vpmullw %ymm7,%ymm12,%ymm7 # b1c1.lo vpmullw %ymm8,%ymm12,%ymm12 # b1d1.lo vmovdqa _16XQ*2(%rcx),%ymm8 vpmulhw %ymm8,%ymm13,%ymm13 vpmulhw %ymm8,%ymm9,%ymm9 vpmulhw %ymm8,%ymm5,%ymm5 vpmulhw %ymm8,%ymm10,%ymm10 vpmulhw %ymm8,%ymm6,%ymm6 vpmulhw %ymm8,%ymm11,%ymm11 vpmulhw %ymm8,%ymm7,%ymm7 vpmulhw %ymm8,%ymm12,%ymm12 vpsubw (%rsp),%ymm13,%ymm13 # -a0c0 vpsubw %ymm9,%ymm1,%ymm9 # a0d0 vpsubw %ymm5,%ymm14,%ymm5 # b0c0 vpsubw %ymm10,%ymm2,%ymm10 # b0d0 vpsubw %ymm6,%ymm15,%ymm6 # a1c1 vpsubw %ymm11,%ymm3,%ymm11 # a1d1 vpsubw %ymm7,%ymm0,%ymm7 # b1c1 vpsubw %ymm12,%ymm4,%ymm12 # b1d1 vmovdqa (%r9),%ymm0 vmovdqa 32(%r9),%ymm1 vpmullw %ymm0,%ymm10,%ymm2 vpmullw %ymm0,%ymm12,%ymm3 vpmulhw %ymm1,%ymm10,%ymm10 vpmulhw %ymm1,%ymm12,%ymm12 vpmulhw %ymm8,%ymm2,%ymm2 vpmulhw %ymm8,%ymm3,%ymm3 vpsubw %ymm2,%ymm10,%ymm10 # rb0d0 vpsubw %ymm3,%ymm12,%ymm12 # rb1d1 vpaddw %ymm5,%ymm9,%ymm9 vpaddw %ymm7,%ymm11,%ymm11 vpsubw %ymm13,%ymm10,%ymm13 vpsubw %ymm12,%ymm6,%ymm6 vmovdqa %ymm13,(64*\off+ 0)*2(%rdi) vmovdqa %ymm9,(64*\off+16)*2(%rdi) vmovdqa %ymm6,(64*\off+32)*2(%rdi) vmovdqa %ymm11,(64*\off+48)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM512_AVX2_basemul_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_basemul_avx) cdecl(PQCLEAN_MLKEM512_AVX2_basemul_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_basemul_avx): mov %rsp,%r8 and $-32,%rsp sub $32,%rsp lea (_ZETAS_EXP+176)*2(%rcx),%r9 schoolbook 0 add $32*2,%r9 schoolbook 1 add $192*2,%r9 schoolbook 2 add $32*2,%r9 schoolbook 3 mov %r8,%rsp ret
mktmansour/MKT-KSA-Geolocation-Security
4,676
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/avx2/shuffle.S
#include "cdecl.h" .include "fq.inc" .include "shuffle.inc" /* nttpack_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 #store vmovdqa %ymm7,(%rdi) vmovdqa %ymm9,32(%rdi) vmovdqa %ymm6,64(%rdi) vmovdqa %ymm3,96(%rdi) vmovdqa %ymm10,128(%rdi) vmovdqa %ymm4,160(%rdi) vmovdqa %ymm5,192(%rdi) vmovdqa %ymm11,224(%rdi) ret */ .text nttunpack128_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle8 4,8,3,8 shuffle8 5,9,4,9 shuffle8 6,10,5,10 shuffle8 7,11,6,11 shuffle4 3,5,7,5 shuffle4 8,10,3,10 shuffle4 4,6,8,6 shuffle4 9,11,4,11 shuffle2 7,8,9,8 shuffle2 5,6,7,6 shuffle2 3,4,5,4 shuffle2 10,11,3,11 shuffle1 9,5,10,5 shuffle1 8,4,9,4 shuffle1 7,3,8,3 shuffle1 6,11,7,11 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm5,32(%rdi) vmovdqa %ymm9,64(%rdi) vmovdqa %ymm4,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm3,160(%rdi) vmovdqa %ymm7,192(%rdi) vmovdqa %ymm11,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM512_AVX2_nttunpack_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_nttunpack_avx) cdecl(PQCLEAN_MLKEM512_AVX2_nttunpack_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_nttunpack_avx): call nttunpack128_avx add $256,%rdi call nttunpack128_avx ret ntttobytes128_avx: #load vmovdqa (%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm7 vmovdqa 96(%rsi),%ymm8 vmovdqa 128(%rsi),%ymm9 vmovdqa 160(%rsi),%ymm10 vmovdqa 192(%rsi),%ymm11 vmovdqa 224(%rsi),%ymm12 #csubq csubq 5,13 csubq 6,13 csubq 7,13 csubq 8,13 csubq 9,13 csubq 10,13 csubq 11,13 csubq 12,13 #bitpack vpsllw $12,%ymm6,%ymm4 vpor %ymm4,%ymm5,%ymm4 vpsrlw $4,%ymm6,%ymm5 vpsllw $8,%ymm7,%ymm6 vpor %ymm5,%ymm6,%ymm5 vpsrlw $8,%ymm7,%ymm6 vpsllw $4,%ymm8,%ymm7 vpor %ymm6,%ymm7,%ymm6 vpsllw $12,%ymm10,%ymm7 vpor %ymm7,%ymm9,%ymm7 vpsrlw $4,%ymm10,%ymm8 vpsllw $8,%ymm11,%ymm9 vpor %ymm8,%ymm9,%ymm8 vpsrlw $8,%ymm11,%ymm9 vpsllw $4,%ymm12,%ymm10 vpor %ymm9,%ymm10,%ymm9 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle2 3,4,8,4 shuffle2 6,5,3,5 shuffle2 7,9,6,9 shuffle4 8,3,7,3 shuffle4 6,4,8,4 shuffle4 5,9,6,9 shuffle8 7,8,5,8 shuffle8 6,3,7,3 shuffle8 4,9,6,9 #store vmovdqu %ymm5,(%rdi) vmovdqu %ymm7,32(%rdi) vmovdqu %ymm6,64(%rdi) vmovdqu %ymm8,96(%rdi) vmovdqu %ymm3,128(%rdi) vmovdqu %ymm9,160(%rdi) ret .global cdecl(PQCLEAN_MLKEM512_AVX2_ntttobytes_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_ntttobytes_avx) cdecl(PQCLEAN_MLKEM512_AVX2_ntttobytes_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_ntttobytes_avx): #consts vmovdqa _16XQ*2(%rdx),%ymm0 call ntttobytes128_avx add $256,%rsi add $192,%rdi call ntttobytes128_avx ret nttfrombytes128_avx: #load vmovdqu (%rsi),%ymm4 vmovdqu 32(%rsi),%ymm5 vmovdqu 64(%rsi),%ymm6 vmovdqu 96(%rsi),%ymm7 vmovdqu 128(%rsi),%ymm8 vmovdqu 160(%rsi),%ymm9 shuffle8 4,7,3,7 shuffle8 5,8,4,8 shuffle8 6,9,5,9 shuffle4 3,8,6,8 shuffle4 7,5,3,5 shuffle4 4,9,7,9 shuffle2 6,5,4,5 shuffle2 8,7,6,7 shuffle2 3,9,8,9 shuffle1 4,7,10,7 shuffle1 5,8,4,8 shuffle1 6,9,5,9 #bitunpack vpsrlw $12,%ymm10,%ymm11 vpsllw $4,%ymm7,%ymm12 vpor %ymm11,%ymm12,%ymm11 vpand %ymm0,%ymm10,%ymm10 vpand %ymm0,%ymm11,%ymm11 vpsrlw $8,%ymm7,%ymm12 vpsllw $8,%ymm4,%ymm13 vpor %ymm12,%ymm13,%ymm12 vpand %ymm0,%ymm12,%ymm12 vpsrlw $4,%ymm4,%ymm13 vpand %ymm0,%ymm13,%ymm13 vpsrlw $12,%ymm8,%ymm14 vpsllw $4,%ymm5,%ymm15 vpor %ymm14,%ymm15,%ymm14 vpand %ymm0,%ymm8,%ymm8 vpand %ymm0,%ymm14,%ymm14 vpsrlw $8,%ymm5,%ymm15 vpsllw $8,%ymm9,%ymm1 vpor %ymm15,%ymm1,%ymm15 vpand %ymm0,%ymm15,%ymm15 vpsrlw $4,%ymm9,%ymm1 vpand %ymm0,%ymm1,%ymm1 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm11,32(%rdi) vmovdqa %ymm12,64(%rdi) vmovdqa %ymm13,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm14,160(%rdi) vmovdqa %ymm15,192(%rdi) vmovdqa %ymm1,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM512_AVX2_nttfrombytes_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_nttfrombytes_avx) cdecl(PQCLEAN_MLKEM512_AVX2_nttfrombytes_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_nttfrombytes_avx): #consts vmovdqa _16XMASK*2(%rdx),%ymm0 call nttfrombytes128_avx add $256,%rdi add $192,%rsi call nttfrombytes128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
1,797
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/avx2/fq.S
#include "cdecl.h" .include "fq.inc" .text reduce128_avx: #load vmovdqa (%rdi),%ymm2 vmovdqa 32(%rdi),%ymm3 vmovdqa 64(%rdi),%ymm4 vmovdqa 96(%rdi),%ymm5 vmovdqa 128(%rdi),%ymm6 vmovdqa 160(%rdi),%ymm7 vmovdqa 192(%rdi),%ymm8 vmovdqa 224(%rdi),%ymm9 red16 2 red16 3 red16 4 red16 5 red16 6 red16 7 red16 8 red16 9 #store vmovdqa %ymm2,(%rdi) vmovdqa %ymm3,32(%rdi) vmovdqa %ymm4,64(%rdi) vmovdqa %ymm5,96(%rdi) vmovdqa %ymm6,128(%rdi) vmovdqa %ymm7,160(%rdi) vmovdqa %ymm8,192(%rdi) vmovdqa %ymm9,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM512_AVX2_reduce_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_reduce_avx) cdecl(PQCLEAN_MLKEM512_AVX2_reduce_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_reduce_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XV*2(%rsi),%ymm1 call reduce128_avx add $256,%rdi call reduce128_avx ret tomont128_avx: #load vmovdqa (%rdi),%ymm3 vmovdqa 32(%rdi),%ymm4 vmovdqa 64(%rdi),%ymm5 vmovdqa 96(%rdi),%ymm6 vmovdqa 128(%rdi),%ymm7 vmovdqa 160(%rdi),%ymm8 vmovdqa 192(%rdi),%ymm9 vmovdqa 224(%rdi),%ymm10 fqmulprecomp 1,2,3,11 fqmulprecomp 1,2,4,12 fqmulprecomp 1,2,5,13 fqmulprecomp 1,2,6,14 fqmulprecomp 1,2,7,15 fqmulprecomp 1,2,8,11 fqmulprecomp 1,2,9,12 fqmulprecomp 1,2,10,13 #store vmovdqa %ymm3,(%rdi) vmovdqa %ymm4,32(%rdi) vmovdqa %ymm5,64(%rdi) vmovdqa %ymm6,96(%rdi) vmovdqa %ymm7,128(%rdi) vmovdqa %ymm8,160(%rdi) vmovdqa %ymm9,192(%rdi) vmovdqa %ymm10,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM512_AVX2_tomont_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_tomont_avx) cdecl(PQCLEAN_MLKEM512_AVX2_tomont_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_tomont_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XMONTSQLO*2(%rsi),%ymm1 vmovdqa _16XMONTSQHI*2(%rsi),%ymm2 call tomont128_avx add $256,%rdi call tomont128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
4,178
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/avx2/ntt.S
#include "cdecl.h" .include "shuffle.inc" .macro mul rh0,rh1,rh2,rh3,zl0=15,zl1=15,zh0=2,zh1=2 vpmullw %ymm\zl0,%ymm\rh0,%ymm12 vpmullw %ymm\zl0,%ymm\rh1,%ymm13 vpmullw %ymm\zl1,%ymm\rh2,%ymm14 vpmullw %ymm\zl1,%ymm\rh3,%ymm15 vpmulhw %ymm\zh0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm\zh0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm\zh1,%ymm\rh2,%ymm\rh2 vpmulhw %ymm\zh1,%ymm\rh3,%ymm\rh3 .endm .macro reduce vpmulhw %ymm0,%ymm12,%ymm12 vpmulhw %ymm0,%ymm13,%ymm13 vpmulhw %ymm0,%ymm14,%ymm14 vpmulhw %ymm0,%ymm15,%ymm15 .endm .macro update rln,rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rln vpsubw %ymm\rh0,%ymm\rl0,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl0 vpsubw %ymm\rh1,%ymm\rl1,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl1 vpsubw %ymm\rh2,%ymm\rl2,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl2 vpsubw %ymm\rh3,%ymm\rl3,%ymm\rh3 vpsubw %ymm12,%ymm\rln,%ymm\rln vpaddw %ymm12,%ymm\rh0,%ymm\rh0 vpsubw %ymm13,%ymm\rl0,%ymm\rl0 vpaddw %ymm13,%ymm\rh1,%ymm\rh1 vpsubw %ymm14,%ymm\rl1,%ymm\rl1 vpaddw %ymm14,%ymm\rh2,%ymm\rh2 vpsubw %ymm15,%ymm\rl2,%ymm\rl2 vpaddw %ymm15,%ymm\rh3,%ymm\rh3 .endm .macro level0 off vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm15 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 vmovdqa %ymm3,(64*\off+ 0)*2(%rdi) vmovdqa %ymm4,(64*\off+ 16)*2(%rdi) vmovdqa %ymm5,(64*\off+ 32)*2(%rdi) vmovdqa %ymm6,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .macro levels1t6 off /* level 1 */ vmovdqa (_ZETAS_EXP+224*\off+16)*2(%rsi),%ymm15 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+112)*2(%rdi),%ymm11 vmovdqa (_ZETAS_EXP+224*\off+32)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 /* level 2 */ shuffle8 5,10,7,10 shuffle8 6,11,5,11 vmovdqa (_ZETAS_EXP+224*\off+48)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+64)*2(%rsi),%ymm2 mul 7,10,5,11 shuffle8 3,8,6,8 shuffle8 4,9,3,9 reduce update 4,6,8,3,9,7,10,5,11 /* level 3 */ shuffle4 8,5,9,5 shuffle4 3,11,8,11 vmovdqa (_ZETAS_EXP+224*\off+80)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+96)*2(%rsi),%ymm2 mul 9,5,8,11 shuffle4 4,7,3,7 shuffle4 6,10,4,10 reduce update 6,3,7,4,10,9,5,8,11 /* level 4 */ shuffle2 7,8,10,8 shuffle2 4,11,7,11 vmovdqa (_ZETAS_EXP+224*\off+112)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+128)*2(%rsi),%ymm2 mul 10,8,7,11 shuffle2 6,9,4,9 shuffle2 3,5,6,5 reduce update 3,4,9,6,5,10,8,7,11 /* level 5 */ shuffle1 9,7,5,7 shuffle1 6,11,9,11 vmovdqa (_ZETAS_EXP+224*\off+144)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+160)*2(%rsi),%ymm2 mul 5,7,9,11 shuffle1 3,10,6,10 shuffle1 4,8,3,8 reduce update 4,6,10,3,8,5,7,9,11 /* level 6 */ vmovdqa (_ZETAS_EXP+224*\off+176)*2(%rsi),%ymm14 vmovdqa (_ZETAS_EXP+224*\off+208)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+192)*2(%rsi),%ymm8 vmovdqa (_ZETAS_EXP+224*\off+224)*2(%rsi),%ymm2 mul 10,3,9,11,14,15,8,2 reduce update 8,4,6,5,7,10,3,9,11 vmovdqa %ymm8,(128*\off+ 0)*2(%rdi) vmovdqa %ymm4,(128*\off+ 16)*2(%rdi) vmovdqa %ymm10,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm6,(128*\off+ 64)*2(%rdi) vmovdqa %ymm5,(128*\off+ 80)*2(%rdi) vmovdqa %ymm9,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM512_AVX2_ntt_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_ntt_avx) cdecl(PQCLEAN_MLKEM512_AVX2_ntt_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_ntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 level0 0 level0 1 levels1t6 0 levels1t6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
4,787
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/avx2/invntt.S
#include "cdecl.h" .include "shuffle.inc" .include "fq.inc" .macro butterfly rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3,zl0=2,zl1=2,zh0=3,zh1=3 vpsubw %ymm\rl0,%ymm\rh0,%ymm12 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rl0 vpsubw %ymm\rl1,%ymm\rh1,%ymm13 vpmullw %ymm\zl0,%ymm12,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl1 vpsubw %ymm\rl2,%ymm\rh2,%ymm14 vpmullw %ymm\zl0,%ymm13,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl2 vpsubw %ymm\rl3,%ymm\rh3,%ymm15 vpmullw %ymm\zl1,%ymm14,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl3 vpmullw %ymm\zl1,%ymm15,%ymm\rh3 vpmulhw %ymm\zh0,%ymm12,%ymm12 vpmulhw %ymm\zh0,%ymm13,%ymm13 vpmulhw %ymm\zh1,%ymm14,%ymm14 vpmulhw %ymm\zh1,%ymm15,%ymm15 vpmulhw %ymm0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm0,%ymm\rh2,%ymm\rh2 vpmulhw %ymm0,%ymm\rh3,%ymm\rh3 # # vpsubw %ymm\rh0,%ymm12,%ymm\rh0 vpsubw %ymm\rh1,%ymm13,%ymm\rh1 vpsubw %ymm\rh2,%ymm14,%ymm\rh2 vpsubw %ymm\rh3,%ymm15,%ymm\rh3 .endm .macro intt_levels0t5 off /* level 0 */ vmovdqa _16XFLO*2(%rsi),%ymm2 vmovdqa _16XFHI*2(%rsi),%ymm3 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 fqmulprecomp 2,3,4 fqmulprecomp 2,3,6 fqmulprecomp 2,3,5 fqmulprecomp 2,3,7 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+112)*2(%rdi),%ymm11 fqmulprecomp 2,3,8 fqmulprecomp 2,3,10 fqmulprecomp 2,3,9 fqmulprecomp 2,3,11 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+208)*2(%rsi),%ymm15 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+176)*2(%rsi),%ymm1 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+224)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+192)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm12 vpshufb %ymm12,%ymm15,%ymm15 vpshufb %ymm12,%ymm1,%ymm1 vpshufb %ymm12,%ymm2,%ymm2 vpshufb %ymm12,%ymm3,%ymm3 butterfly 4,5,8,9,6,7,10,11,15,1,2,3 /* level 1 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+144)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+160)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm1 vpshufb %ymm1,%ymm2,%ymm2 vpshufb %ymm1,%ymm3,%ymm3 butterfly 4,5,6,7,8,9,10,11,2,2,3,3 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 /* level 2 */ vmovdqa _REVIDXD*2(%rsi),%ymm12 vpermd (_ZETAS_EXP+(1-\off)*224+112)*2(%rsi),%ymm12,%ymm2 vpermd (_ZETAS_EXP+(1-\off)*224+128)*2(%rsi),%ymm12,%ymm10 butterfly 3,4,6,8,5,7,9,11,2,2,10,10 vmovdqa _16XV*2(%rsi),%ymm1 red16 3 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 /* level 3 */ vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+80)*2(%rsi),%ymm2 vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+96)*2(%rsi),%ymm9 butterfly 10,3,6,5,4,8,7,11,2,2,9,9 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 /* level 4 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+48)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+64)*2(%rsi),%ymm7 butterfly 9,10,6,4,3,5,8,11,2,2,7,7 red16 9 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 /* level 5 */ vmovdqa (_ZETAS_EXP+(1-\off)*224+16)*2(%rsi),%ymm2 vmovdqa (_ZETAS_EXP+(1-\off)*224+32)*2(%rsi),%ymm8 butterfly 7,9,6,3,10,4,5,11,2,2,8,8 vmovdqa %ymm7,(128*\off+ 0)*2(%rdi) vmovdqa %ymm9,(128*\off+ 16)*2(%rdi) vmovdqa %ymm6,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm10,(128*\off+ 64)*2(%rdi) vmovdqa %ymm4,(128*\off+ 80)*2(%rdi) vmovdqa %ymm5,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .macro intt_level6 off /* level 6 */ vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm2 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm3 butterfly 4,5,6,7,8,9,10,11 .if \off == 0 red16 4 .endif vmovdqa %ymm4,(64*\off+ 0)*2(%rdi) vmovdqa %ymm5,(64*\off+ 16)*2(%rdi) vmovdqa %ymm6,(64*\off+ 32)*2(%rdi) vmovdqa %ymm7,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM512_AVX2_invntt_avx) .global _cdecl(PQCLEAN_MLKEM512_AVX2_invntt_avx) cdecl(PQCLEAN_MLKEM512_AVX2_invntt_avx): _cdecl(PQCLEAN_MLKEM512_AVX2_invntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 intt_levels0t5 0 intt_levels0t5 1 intt_level6 0 intt_level6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
12,888
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/aarch64/__asm_iNTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_bot .global _PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_bot PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_bot: _PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_bot: push_all Q .req w20 BarrettM .req w21 src0 .req x0 src1 .req x1 table .req x28 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, #1*16] ldr q29, [src1, #1*16] ldr q30, [src0, #3*16] ldr q31, [src1, #3*16] trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 mov counter, #3 _intt_bot_loop: str q24, [src0, #0*16] ldr q28, [src0, #(64+1*16)] str q25, [src1, #0*16] ldr q29, [src1, #(64+1*16)] str q26, [src0, #2*16] ldr q30, [src0, #(64+3*16)] str q27, [src1, #2*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 sub counter, counter, #1 cbnz counter, _intt_bot_loop str q24, [src0, #0*16] str q25, [src1, #0*16] str q26, [src0, #2*16] str q27, [src1, #2*16] .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_top .global _PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_top PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_top: _PQCLEAN_MLKEM512_AARCH64__asm_intt_SIMD_top: push_all Q .req w20 BarrettM .req w21 invN .req w22 invN_f .req w23 src .req x0 table .req x1 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] ldr invN, [x2, #10] ldr invN_f, [x2, #14] mov v4.S[0], invN mov v4.S[1], invN_f ldr q0, [table, #0*16] mov v0.H[0], Q ldr q1, [table, #1*16] ldr q2, [table, #2*16] ldr q3, [table, #3*16] ldr q16, [src, # 8*32] ldr q17, [src, # 9*32] ldr q18, [src, #10*32] ldr q19, [src, #11*32] ldr q20, [src, #12*32] ldr q21, [src, #13*32] ldr q22, [src, #14*32] ldr q23, [src, #15*32] qo_butterfly_botll \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q12, q13, q14, q15, \ #4*32, #5*32, #6*32, #7*32 qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_topsl \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #8*32, #9*32, #10*32, #11*32, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_insl \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #12*32, #13*32, #14*32, #15*32, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_butterfly_botsl_mul \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32), \ v12, v13, v14, v15, v24, v25, v26, v27, \ v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0 str q12, [src, # 4*32] ldr q12, [src, #(16+ 4*32)] str q13, [src, # 5*32] ldr q13, [src, #(16+ 5*32)] str q14, [src, # 6*32] ldr q14, [src, #(16+ 6*32)] str q15, [src, # 7*32] ldr q15, [src, #(16+ 7*32)] qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_tops \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_ins \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_montgomery_mul_ins \ v12, v13, v14, v15, v24, v25, v26, v27, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32) str q12, [src, #(16+ 4*32)] str q13, [src, #(16+ 5*32)] str q14, [src, #(16+ 6*32)] str q15, [src, #(16+ 7*32)] .unreq Q .unreq BarrettM .unreq invN .unreq invN_f .unreq src .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
23,888
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/aarch64/__asm_base_mul.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" #include "params.h" .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_point_mul_extended .global _PQCLEAN_MLKEM512_AARCH64__asm_point_mul_extended PQCLEAN_MLKEM512_AARCH64__asm_point_mul_extended: _PQCLEAN_MLKEM512_AARCH64__asm_point_mul_extended: push_all Q .req w20 des .req x0 src1 .req x1 src2ex .req x2 counter .req x19 ldrsh Q, [x3] dup v28.8H, Q ldr q0, [src1, #0*16] ldr q1, [src1, #1*16] ldr q2, [src1, #2*16] ldr q3, [src1, #3*16] ldr q4, [src1, #4*16] ldr q5, [src1, #5*16] ldr q6, [src1, #6*16] ldr q7, [src1, #7*16] add src1, src1, #8*16 uzp2 v1.8H, v0.8H, v1.8H uzp2 v3.8H, v2.8H, v3.8H uzp2 v5.8H, v4.8H, v5.8H uzp2 v7.8H, v6.8H, v7.8H ldr q8, [src2ex, #0*16] ldr q10, [src2ex, #2*16] ldr q12, [src2ex, #4*16] ldr q14, [src2ex, #6*16] ldr q9, [src2ex, #1*16] ldr q11, [src2ex, #3*16] ldr q13, [src2ex, #5*16] ldr q15, [src2ex, #7*16] add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q0, [src1, #0*16] sqrdmulh v16.8H, v17.8H, v8.8H ldr q1, [src1, #1*16] sqrdmulh v18.8H, v19.8H, v10.8H ldr q2, [src1, #2*16] sqrdmulh v20.8H, v21.8H, v12.8H ldr q3, [src1, #3*16] sqrdmulh v22.8H, v23.8H, v14.8H ldr q4, [src1, #4*16] mul v17.8H, v17.8H, v9.8H uzp2 v1.8H, v0.8H, v1.8H ldr q5, [src1, #5*16] mul v19.8H, v19.8H, v11.8H uzp2 v3.8H, v2.8H, v3.8H ldr q6, [src1, #6*16] mul v21.8H, v21.8H, v13.8H uzp2 v5.8H, v4.8H, v5.8H ldr q7, [src1, #7*16] mul v23.8H, v23.8H, v15.8H uzp2 v7.8H, v6.8H, v7.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v17.8H, v16.8H, v28.8H ldr q10, [src2ex, #2*16] mls v19.8H, v18.8H, v28.8H ldr q12, [src2ex, #4*16] mls v21.8H, v20.8H, v28.8H ldr q14, [src2ex, #6*16] mls v23.8H, v22.8H, v28.8H ldr q9, [src2ex, #1*16] str q17, [des, #0*16] ldr q11, [src2ex, #3*16] str q19, [des, #1*16] ldr q13, [src2ex, #5*16] str q21, [des, #2*16] ldr q15, [src2ex, #7*16] str q23, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 sqrdmulh v16.8H, v17.8H, v8.8H sqrdmulh v18.8H, v19.8H, v10.8H sqrdmulh v20.8H, v21.8H, v12.8H sqrdmulh v22.8H, v23.8H, v14.8H mul v17.8H, v17.8H, v9.8H mul v19.8H, v19.8H, v11.8H mul v21.8H, v21.8H, v13.8H mul v23.8H, v23.8H, v15.8H mls v17.8H, v16.8H, v28.8H mls v19.8H, v18.8H, v28.8H mls v21.8H, v20.8H, v28.8H mls v23.8H, v22.8H, v28.8H str q17, [des, #0*16] str q19, [des, #1*16] str q21, [des, #2*16] str q23, [des, #3*16] add des, des, #4*16 .unreq Q .unreq des .unreq src1 .unreq src2ex .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul .global _PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul: _PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr s4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif // TODO:interleaving mov counter, #15 _asymmetric_mul_loop: ldr q20, [src1_0, #0*16] uzp1 v6.8H, v16.8H, v18.8H ldr q21, [src1_0, #1*16] uzp1 v7.8H, v17.8H, v19.8H ldr q22, [src2_0, #0*16] mul v6.8H, v6.8H, v4.H[1] ldr q23, [src2_0, #1*16] mul v7.8H, v7.8H, v4.H[1] add src1_0, src1_0, #32 add src2_0, src2_0, #32 smlal v16.4S, v6.4H, v4.H[0] uzp1 v0.8H, v20.8H, v21.8H smlal2 v18.4S, v6.8H, v4.H[0] uzp2 v1.8H, v20.8H, v21.8H smlal v17.4S, v7.4H, v4.H[0] uzp1 v2.8H, v22.8H, v23.8H smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul_montgomery .global _PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul_montgomery PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul_montgomery: _PQCLEAN_MLKEM512_AARCH64__asm_asymmetric_mul_montgomery: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr q4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif mov counter, #15 _asymmetric_mul_montgomery_loop: uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] ldr q20, [src1_0, #0*16] smlal v16.4S, v6.4H, v4.H[0] ldr q21, [src1_0, #1*16] smlal2 v18.4S, v6.8H, v4.H[0] ldr q22, [src2_0, #0*16] smlal v17.4S, v7.4H, v4.H[0] ldr q23, [src2_0, #1*16] smlal2 v19.4S, v7.8H, v4.H[0] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H uzp1 v0.8H, v20.8H, v21.8H sqrdmulh v16.8H, v6.8H, v4.H[4] uzp2 v1.8H, v20.8H, v21.8H sqrdmulh v17.8H, v7.8H, v4.H[4] uzp1 v2.8H, v22.8H, v23.8H mul v6.8H, v6.8H, v4.H[5] uzp2 v3.8H, v22.8H, v23.8H mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_montgomery_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H sqrdmulh v16.8H, v6.8H, v4.H[4] sqrdmulh v17.8H, v7.8H, v4.H[4] mul v6.8H, v6.8H, v4.H[5] mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
12,798
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/aarch64/__asm_NTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_top .global _PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_top PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_top: _PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_top: push_simd Q .req w8 src .req x0 table .req x1 counter .req x11 ldrsh Q, [x2, #0] ldr q0, [table, # 0*16] ldr q1, [table, # 1*16] ldr q2, [table, # 2*16] ldr q3, [table, # 3*16] mov v0.H[0], Q ldr q13, [src, # 9*32] ldr q15, [src, #11*32] ldr q17, [src, #13*32] ldr q19, [src, #15*32] qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32 qo_butterfly_mixll \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32, \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixsls \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_botsls \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #9*32, #11*32, #13*32, #15*32, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32 qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) qo_butterfly_mixl \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixss \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_botss \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) .unreq Q .unreq src .unreq table .unreq counter pop_simd ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_bot .global _PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_bot PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_bot: _PQCLEAN_MLKEM512_AARCH64__asm_ntt_SIMD_bot: push_simd Q .req w8 BarrettM .req w9 src0 .req x0 src1 .req x1 table .req x10 counter .req x11 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, # 1*16] ldr q29, [src1, # 1*16] ldr q30, [src0, # 3*16] ldr q31, [src1, # 3*16] trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 mov counter, #3 _ntt_bot_loop: str q28, [src0, # 1*16] ldr q28, [src0, #(64+1*16)] str q29, [src1, # 1*16] ldr q29, [src1, #(64+1*16)] str q30, [src0, # 3*16] ldr q30, [src0, #(64+3*16)] str q31, [src1, # 3*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 sub counter, counter, #1 cbnz counter, _ntt_bot_loop str q28, [src0, # 1*16] str q29, [src1, # 1*16] str q30, [src0, # 3*16] str q31, [src1, # 3*16] add src0, src0, #64 add src1, src1, #64 .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_simd ret
mktmansour/MKT-KSA-Geolocation-Security
7,463
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-512/aarch64/__asm_poly.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_add_reduce .global _PQCLEAN_MLKEM512_AARCH64__asm_add_reduce PQCLEAN_MLKEM512_AARCH64__asm_add_reduce: _PQCLEAN_MLKEM512_AARCH64__asm_add_reduce: mov w4, #3329 mov w5, #25519 add x2, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 add v4.8H, v16.8H, v24.8H add v5.8H, v17.8H, v25.8H add v6.8H, v18.8H, v26.8H add v7.8H, v19.8H, v27.8H add v16.8H, v20.8H, v28.8H add v17.8H, v21.8H, v29.8H add v18.8H, v22.8H, v30.8H add v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _add_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 add v4.8H, v16.8H, v24.8H add v5.8H, v17.8H, v25.8H add v6.8H, v18.8H, v26.8H add v7.8H, v19.8H, v27.8H add v16.8H, v20.8H, v28.8H add v17.8H, v21.8H, v29.8H add v18.8H, v22.8H, v30.8H add v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _add_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_sub_reduce .global _PQCLEAN_MLKEM512_AARCH64__asm_sub_reduce PQCLEAN_MLKEM512_AARCH64__asm_sub_reduce: _PQCLEAN_MLKEM512_AARCH64__asm_sub_reduce: mov w4, #3329 mov w5, #25519 add x2, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 sub v4.8H, v16.8H, v24.8H sub v5.8H, v17.8H, v25.8H sub v6.8H, v18.8H, v26.8H sub v7.8H, v19.8H, v27.8H sub v16.8H, v20.8H, v28.8H sub v17.8H, v21.8H, v29.8H sub v18.8H, v22.8H, v30.8H sub v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _sub_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64 sub v4.8H, v16.8H, v24.8H sub v5.8H, v17.8H, v25.8H sub v6.8H, v18.8H, v26.8H sub v7.8H, v19.8H, v27.8H sub v16.8H, v20.8H, v28.8H sub v17.8H, v21.8H, v29.8H sub v18.8H, v22.8H, v30.8H sub v19.8H, v23.8H, v31.8H oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _sub_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64 ret .align 2 .global PQCLEAN_MLKEM512_AARCH64__asm_add_add_reduce .global _PQCLEAN_MLKEM512_AARCH64__asm_add_add_reduce PQCLEAN_MLKEM512_AARCH64__asm_add_add_reduce: _PQCLEAN_MLKEM512_AARCH64__asm_add_add_reduce: mov w4, #3329 mov w5, #25519 add x3, x0, #0 dup v0.8H, w4 dup v1.8H, w5 ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H ld1 {v16.8H, v17.8H}, [x2], #32 add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H ld1 {v18.8H, v19.8H}, [x2], #32 add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H ld1 {v24.8H, v25.8H}, [x2], #32 add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H ld1 {v26.8H, v27.8H}, [x2], #32 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0 mov x15, #3 _add_add_reduce_loop: st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64 st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64 ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64 ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64 ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H ld1 {v16.8H, v17.8H}, [x2], #32 add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H ld1 {v18.8H, v19.8H}, [x2], #32 add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H ld1 {v24.8H, v25.8H}, [x2], #32 add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H ld1 {v26.8H, v27.8H}, [x2], #32 add v4.8H, v4.8H, v16.8H add v5.8H, v5.8H, v17.8H add v6.8H, v6.8H, v18.8H add v7.8H, v7.8H, v19.8H add v20.8H, v20.8H, v24.8H add v21.8H, v21.8H, v25.8H add v22.8H, v22.8H, v26.8H add v23.8H, v23.8H, v27.8H oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0 sub x15, x15, #1 cbnz x15, _add_add_reduce_loop st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64 st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64 ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
17,918
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 523740 # asm 1: add $523740,<input_1=int64#2 # asm 2: add $523740,<input_1=%rsi add $523740, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1248 # asm 1: mov $1248,>row=int64#5 # asm 2: mov $1248,>row=%r8 mov $1248, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 156 ] # asm 1: vmovupd 156(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 156(<input_2=%rdx),>ee=%ymm1 vmovupd 156( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 188 ] # asm 1: vmovupd 188(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 188(<input_2=%rdx),>ee=%ymm2 vmovupd 188( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 220 ] # asm 1: vmovupd 220(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 220(<input_2=%rdx),>ee=%ymm2 vmovupd 220( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 252 ] # asm 1: vmovupd 252(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 252(<input_2=%rdx),>ee=%ymm2 vmovupd 252( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 284 ] # asm 1: vmovupd 284(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 284(<input_2=%rdx),>ee=%ymm2 vmovupd 284( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 316 ] # asm 1: vmovupd 316(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 316(<input_2=%rdx),>ee=%ymm2 vmovupd 316( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 348 ] # asm 1: vmovupd 348(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 348(<input_2=%rdx),>ee=%ymm2 vmovupd 348( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 380 ] # asm 1: vmovupd 380(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 380(<input_2=%rdx),>ee=%ymm2 vmovupd 380( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 412 ] # asm 1: vmovupd 412(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 412(<input_2=%rdx),>ee=%ymm2 vmovupd 412( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 444 ] # asm 1: vmovupd 444(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 444(<input_2=%rdx),>ee=%ymm2 vmovupd 444( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 476 ] # asm 1: vmovupd 476(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 476(<input_2=%rdx),>ee=%ymm2 vmovupd 476( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 508 ] # asm 1: vmovupd 508(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 508(<input_2=%rdx),>ee=%ymm2 vmovupd 508( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 540 ] # asm 1: vmovupd 540(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 540(<input_2=%rdx),>ee=%ymm2 vmovupd 540( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint32 *)(input_1 + 416) # asm 1: movl 416(<input_1=int64#2),>s=int64#6d # asm 2: movl 416(<input_1=%rsi),>s=%r9d movl 416( % rsi), % r9d # qhasm: e = *(uint32 *)(input_2 + 572) # asm 1: movl 572(<input_2=int64#3),>e=int64#7d # asm 2: movl 572(<input_2=%rdx),>e=%eax movl 572( % rdx), % eax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 420 # asm 1: sub $420,<input_1=int64#2 # asm 2: sub $420,<input_1=%rsi sub $420, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: ss = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0 vmovupd 96( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 96 ] = ss # asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: s = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>s=int64#2 # asm 2: movq 128(<input_0=%rdi),>s=%rsi movq 128( % rdi), % rsi # qhasm: e = mem64[ input_2 + 128 ] # asm 1: movq 128(<input_2=int64#3),>e=int64#4 # asm 2: movq 128(<input_2=%rdx),>e=%rcx movq 128( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 128 ] = s # asm 1: movq <s=int64#2,128(<input_0=int64#1) # asm 2: movq <s=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: s = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>s=int64#2 # asm 2: movq 136(<input_0=%rdi),>s=%rsi movq 136( % rdi), % rsi # qhasm: e = mem64[ input_2 + 136 ] # asm 1: movq 136(<input_2=int64#3),>e=int64#4 # asm 2: movq 136(<input_2=%rdx),>e=%rcx movq 136( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 136 ] = s # asm 1: movq <s=int64#2,136(<input_0=int64#1) # asm 2: movq <s=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: s = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>s=int64#2 # asm 2: movq 144(<input_0=%rdi),>s=%rsi movq 144( % rdi), % rsi # qhasm: e = mem64[ input_2 + 144 ] # asm 1: movq 144(<input_2=int64#3),>e=int64#4 # asm 2: movq 144(<input_2=%rdx),>e=%rcx movq 144( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 144 ] = s # asm 1: movq <s=int64#2,144(<input_0=int64#1) # asm 2: movq <s=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: s = *(uint32 *)( input_0 + 152 ) # asm 1: movl 152(<input_0=int64#1),>s=int64#2d # asm 2: movl 152(<input_0=%rdi),>s=%esi movl 152( % rdi), % esi # qhasm: e = *(uint32 *)( input_2 + 152 ) # asm 1: movl 152(<input_2=int64#3),>e=int64#3d # asm 2: movl 152(<input_2=%rdx),>e=%edx movl 152( % rdx), % edx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: *(uint32 *)( input_0 + 152 ) = s # asm 1: movl <s=int64#2d,152(<input_0=int64#1) # asm 2: movl <s=%esi,152(<input_0=%rdi) movl % esi, 152( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece460896f/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
29,012
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/vec_mul_asm.S
#include "namespace.h" #define vec_mul_asm CRYPTO_NAMESPACE(vec_mul_asm) #define _vec_mul_asm _CRYPTO_NAMESPACE(vec_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 s0 # qhasm: reg256 s1 # qhasm: reg256 s2 # qhasm: reg256 s3 # qhasm: reg256 s4 # qhasm: reg256 s5 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 t2 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: int64 h0 # qhasm: int64 h1 # qhasm: int64 h2 # qhasm: int64 h3 # qhasm: int64 h4 # qhasm: int64 h5 # qhasm: int64 h6 # qhasm: int64 h7 # qhasm: int64 h8 # qhasm: int64 h9 # qhasm: int64 h10 # qhasm: int64 h11 # qhasm: int64 h12 # qhasm: int64 h13 # qhasm: int64 h14 # qhasm: int64 h15 # qhasm: int64 h16 # qhasm: int64 h17 # qhasm: int64 h18 # qhasm: int64 h19 # qhasm: int64 h20 # qhasm: int64 h21 # qhasm: int64 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: enter vec_mul_asm .p2align 5 .global _vec_mul_asm .global vec_mul_asm _vec_mul_asm: vec_mul_asm: mov % rsp, % r11 and $31, % r11 add $672, % r11 sub % r11, % rsp # qhasm: r11_stack = caller_r11 # asm 1: movq <caller_r11=int64#9,>r11_stack=stack64#1 # asm 2: movq <caller_r11=%r11,>r11_stack=608(%rsp) movq % r11, 608( % rsp) # qhasm: r12_stack = caller_r12 # asm 1: movq <caller_r12=int64#10,>r12_stack=stack64#2 # asm 2: movq <caller_r12=%r12,>r12_stack=616(%rsp) movq % r12, 616( % rsp) # qhasm: r13_stack = caller_r13 # asm 1: movq <caller_r13=int64#11,>r13_stack=stack64#3 # asm 2: movq <caller_r13=%r13,>r13_stack=624(%rsp) movq % r13, 624( % rsp) # qhasm: r14_stack = caller_r14 # asm 1: movq <caller_r14=int64#12,>r14_stack=stack64#4 # asm 2: movq <caller_r14=%r14,>r14_stack=632(%rsp) movq % r14, 632( % rsp) # qhasm: r15_stack = caller_r15 # asm 1: movq <caller_r15=int64#13,>r15_stack=stack64#5 # asm 2: movq <caller_r15=%r15,>r15_stack=640(%rsp) movq % r15, 640( % rsp) # qhasm: rbx_stack = caller_rbx # asm 1: movq <caller_rbx=int64#14,>rbx_stack=stack64#6 # asm 2: movq <caller_rbx=%rbx,>rbx_stack=648(%rsp) movq % rbx, 648( % rsp) # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: s0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>s0=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>s0=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: s1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>s1=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>s1=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: s2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>s2=reg256#3 # asm 2: vmovupd 64(<input_1=%rsi),>s2=%ymm2 vmovupd 64( % rsi), % ymm2 # qhasm: t0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>t0=reg256#4 # asm 2: vmovupd 0(<input_2=%rdx),>t0=%ymm3 vmovupd 0( % rdx), % ymm3 # qhasm: t1 = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>t1=reg256#5 # asm 2: vmovupd 32(<input_2=%rdx),>t1=%ymm4 vmovupd 32( % rdx), % ymm4 # qhasm: t2 = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>t2=reg256#6 # asm 2: vmovupd 64(<input_2=%rdx),>t2=%ymm5 vmovupd 64( % rdx), % ymm5 # qhasm: a5[0,1,2,3] = s2[2,2,3,3] # asm 1: vpermq $0xfa,<s2=reg256#3,>a5=reg256#7 # asm 2: vpermq $0xfa,<s2=%ymm2,>a5=%ymm6 vpermq $0xfa, % ymm2, % ymm6 # qhasm: b5[0,1,2,3] = t2[2,3,2,3] # asm 1: vpermq $0xee,<t2=reg256#6,>b5=reg256#8 # asm 2: vpermq $0xee,<t2=%ymm5,>b5=%ymm7 vpermq $0xee, % ymm5, % ymm7 # qhasm: r10 = a5 & b5 # asm 1: vpand <a5=reg256#7,<b5=reg256#8,>r10=reg256#9 # asm 2: vpand <a5=%ymm6,<b5=%ymm7,>r10=%ymm8 vpand % ymm6, % ymm7, % ymm8 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#9,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm8,320(<ptr=%rcx) vmovupd % ymm8, 320( % rcx) # qhasm: b4[0,1,2,3] = t2[0,1,0,1] # asm 1: vpermq $0x44,<t2=reg256#6,>b4=reg256#6 # asm 2: vpermq $0x44,<t2=%ymm5,>b4=%ymm5 vpermq $0x44, % ymm5, % ymm5 # qhasm: r9 = a5 & b4 # asm 1: vpand <a5=reg256#7,<b4=reg256#6,>r9=reg256#9 # asm 2: vpand <a5=%ymm6,<b4=%ymm5,>r9=%ymm8 vpand % ymm6, % ymm5, % ymm8 # qhasm: b3[0,1,2,3] = t1[2,3,2,3] # asm 1: vpermq $0xee,<t1=reg256#5,>b3=reg256#10 # asm 2: vpermq $0xee,<t1=%ymm4,>b3=%ymm9 vpermq $0xee, % ymm4, % ymm9 # qhasm: r8 = a5 & b3 # asm 1: vpand <a5=reg256#7,<b3=reg256#10,>r8=reg256#11 # asm 2: vpand <a5=%ymm6,<b3=%ymm9,>r8=%ymm10 vpand % ymm6, % ymm9, % ymm10 # qhasm: b2[0,1,2,3] = t1[0,1,0,1] # asm 1: vpermq $0x44,<t1=reg256#5,>b2=reg256#5 # asm 2: vpermq $0x44,<t1=%ymm4,>b2=%ymm4 vpermq $0x44, % ymm4, % ymm4 # qhasm: r7 = a5 & b2 # asm 1: vpand <a5=reg256#7,<b2=reg256#5,>r7=reg256#12 # asm 2: vpand <a5=%ymm6,<b2=%ymm4,>r7=%ymm11 vpand % ymm6, % ymm4, % ymm11 # qhasm: b1[0,1,2,3] = t0[2,3,2,3] # asm 1: vpermq $0xee,<t0=reg256#4,>b1=reg256#13 # asm 2: vpermq $0xee,<t0=%ymm3,>b1=%ymm12 vpermq $0xee, % ymm3, % ymm12 # qhasm: r6 = a5 & b1 # asm 1: vpand <a5=reg256#7,<b1=reg256#13,>r6=reg256#14 # asm 2: vpand <a5=%ymm6,<b1=%ymm12,>r6=%ymm13 vpand % ymm6, % ymm12, % ymm13 # qhasm: b0[0,1,2,3] = t0[0,1,0,1] # asm 1: vpermq $0x44,<t0=reg256#4,>b0=reg256#4 # asm 2: vpermq $0x44,<t0=%ymm3,>b0=%ymm3 vpermq $0x44, % ymm3, % ymm3 # qhasm: r5 = a5 & b0 # asm 1: vpand <a5=reg256#7,<b0=reg256#4,>r5=reg256#7 # asm 2: vpand <a5=%ymm6,<b0=%ymm3,>r5=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: a4[0,1,2,3] = s2[0,0,1,1] # asm 1: vpermq $0x50,<s2=reg256#3,>a4=reg256#3 # asm 2: vpermq $0x50,<s2=%ymm2,>a4=%ymm2 vpermq $0x50, % ymm2, % ymm2 # qhasm: r = a4 & b5 # asm 1: vpand <a4=reg256#3,<b5=reg256#8,>r=reg256#15 # asm 2: vpand <a4=%ymm2,<b5=%ymm7,>r=%ymm14 vpand % ymm2, % ymm7, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#9,<r9=reg256#9 # asm 2: vpxor <r=%ymm14,<r9=%ymm8,<r9=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#9,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm8,288(<ptr=%rcx) vmovupd % ymm8, 288( % rcx) # qhasm: r = a4 & b4 # asm 1: vpand <a4=reg256#3,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b4=%ymm5,>r=%ymm8 vpand % ymm2, % ymm5, % ymm8 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#9,<r8=reg256#11,<r8=reg256#11 # asm 2: vpxor <r=%ymm8,<r8=%ymm10,<r8=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: r = a4 & b3 # asm 1: vpand <a4=reg256#3,<b3=reg256#10,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b3=%ymm9,>r=%ymm8 vpand % ymm2, % ymm9, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm8,<r7=%ymm11,<r7=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r = a4 & b2 # asm 1: vpand <a4=reg256#3,<b2=reg256#5,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b2=%ymm4,>r=%ymm8 vpand % ymm2, % ymm4, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm8,<r6=%ymm13,<r6=%ymm13 vpxor % ymm8, % ymm13, % ymm13 # qhasm: r = a4 & b1 # asm 1: vpand <a4=reg256#3,<b1=reg256#13,>r=reg256#9 # asm 2: vpand <a4=%ymm2,<b1=%ymm12,>r=%ymm8 vpand % ymm2, % ymm12, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm8,<r5=%ymm6,<r5=%ymm6 vpxor % ymm8, % ymm6, % ymm6 # qhasm: r4 = a4 & b0 # asm 1: vpand <a4=reg256#3,<b0=reg256#4,>r4=reg256#3 # asm 2: vpand <a4=%ymm2,<b0=%ymm3,>r4=%ymm2 vpand % ymm2, % ymm3, % ymm2 # qhasm: a3[0,1,2,3] = s1[2,2,3,3] # asm 1: vpermq $0xfa,<s1=reg256#2,>a3=reg256#9 # asm 2: vpermq $0xfa,<s1=%ymm1,>a3=%ymm8 vpermq $0xfa, % ymm1, % ymm8 # qhasm: r = a3 & b5 # asm 1: vpand <a3=reg256#9,<b5=reg256#8,>r=reg256#15 # asm 2: vpand <a3=%ymm8,<b5=%ymm7,>r=%ymm14 vpand % ymm8, % ymm7, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#11,<r8=reg256#11 # asm 2: vpxor <r=%ymm14,<r8=%ymm10,<r8=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#11,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm10,256(<ptr=%rcx) vmovupd % ymm10, 256( % rcx) # qhasm: r = a3 & b4 # asm 1: vpand <a3=reg256#9,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b4=%ymm5,>r=%ymm10 vpand % ymm8, % ymm5, % ymm10 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#11,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm10,<r7=%ymm11,<r7=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r = a3 & b3 # asm 1: vpand <a3=reg256#9,<b3=reg256#10,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b3=%ymm9,>r=%ymm10 vpand % ymm8, % ymm9, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm10,<r6=%ymm13,<r6=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r = a3 & b2 # asm 1: vpand <a3=reg256#9,<b2=reg256#5,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b2=%ymm4,>r=%ymm10 vpand % ymm8, % ymm4, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm10,<r5=%ymm6,<r5=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a3 & b1 # asm 1: vpand <a3=reg256#9,<b1=reg256#13,>r=reg256#11 # asm 2: vpand <a3=%ymm8,<b1=%ymm12,>r=%ymm10 vpand % ymm8, % ymm12, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r3 = a3 & b0 # asm 1: vpand <a3=reg256#9,<b0=reg256#4,>r3=reg256#9 # asm 2: vpand <a3=%ymm8,<b0=%ymm3,>r3=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: a2[0,1,2,3] = s1[0,0,1,1] # asm 1: vpermq $0x50,<s1=reg256#2,>a2=reg256#2 # asm 2: vpermq $0x50,<s1=%ymm1,>a2=%ymm1 vpermq $0x50, % ymm1, % ymm1 # qhasm: r = a2 & b5 # asm 1: vpand <a2=reg256#2,<b5=reg256#8,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b5=%ymm7,>r=%ymm10 vpand % ymm1, % ymm7, % ymm10 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#11,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm10,<r7=%ymm11,<r7=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%rcx) vmovupd % ymm11, 224( % rcx) # qhasm: r = a2 & b4 # asm 1: vpand <a2=reg256#2,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b4=%ymm5,>r=%ymm10 vpand % ymm1, % ymm5, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm10,<r6=%ymm13,<r6=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r = a2 & b3 # asm 1: vpand <a2=reg256#2,<b3=reg256#10,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b3=%ymm9,>r=%ymm10 vpand % ymm1, % ymm9, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm10,<r5=%ymm6,<r5=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a2 & b2 # asm 1: vpand <a2=reg256#2,<b2=reg256#5,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b2=%ymm4,>r=%ymm10 vpand % ymm1, % ymm4, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r = a2 & b1 # asm 1: vpand <a2=reg256#2,<b1=reg256#13,>r=reg256#11 # asm 2: vpand <a2=%ymm1,<b1=%ymm12,>r=%ymm10 vpand % ymm1, % ymm12, % ymm10 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#11,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm10,<r3=%ymm8,<r3=%ymm8 vpxor % ymm10, % ymm8, % ymm8 # qhasm: r2 = a2 & b0 # asm 1: vpand <a2=reg256#2,<b0=reg256#4,>r2=reg256#2 # asm 2: vpand <a2=%ymm1,<b0=%ymm3,>r2=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: a1[0,1,2,3] = s0[2,2,3,3] # asm 1: vpermq $0xfa,<s0=reg256#1,>a1=reg256#11 # asm 2: vpermq $0xfa,<s0=%ymm0,>a1=%ymm10 vpermq $0xfa, % ymm0, % ymm10 # qhasm: r = a1 & b5 # asm 1: vpand <a1=reg256#11,<b5=reg256#8,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b5=%ymm7,>r=%ymm11 vpand % ymm10, % ymm7, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#14,<r6=reg256#14 # asm 2: vpxor <r=%ymm11,<r6=%ymm13,<r6=%ymm13 vpxor % ymm11, % ymm13, % ymm13 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#14,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm13,192(<ptr=%rcx) vmovupd % ymm13, 192( % rcx) # qhasm: r = a1 & b4 # asm 1: vpand <a1=reg256#11,<b4=reg256#6,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b4=%ymm5,>r=%ymm11 vpand % ymm10, % ymm5, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm11,<r5=%ymm6,<r5=%ymm6 vpxor % ymm11, % ymm6, % ymm6 # qhasm: r = a1 & b3 # asm 1: vpand <a1=reg256#11,<b3=reg256#10,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b3=%ymm9,>r=%ymm11 vpand % ymm10, % ymm9, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm11,<r4=%ymm2,<r4=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r = a1 & b2 # asm 1: vpand <a1=reg256#11,<b2=reg256#5,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b2=%ymm4,>r=%ymm11 vpand % ymm10, % ymm4, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm11,<r3=%ymm8,<r3=%ymm8 vpxor % ymm11, % ymm8, % ymm8 # qhasm: r = a1 & b1 # asm 1: vpand <a1=reg256#11,<b1=reg256#13,>r=reg256#12 # asm 2: vpand <a1=%ymm10,<b1=%ymm12,>r=%ymm11 vpand % ymm10, % ymm12, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm11,<r2=%ymm1,<r2=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r1 = a1 & b0 # asm 1: vpand <a1=reg256#11,<b0=reg256#4,>r1=reg256#11 # asm 2: vpand <a1=%ymm10,<b0=%ymm3,>r1=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: a0[0,1,2,3] = s0[0,0,1,1] # asm 1: vpermq $0x50,<s0=reg256#1,>a0=reg256#1 # asm 2: vpermq $0x50,<s0=%ymm0,>a0=%ymm0 vpermq $0x50, % ymm0, % ymm0 # qhasm: r = a0 & b5 # asm 1: vpand <a0=reg256#1,<b5=reg256#8,>r=reg256#8 # asm 2: vpand <a0=%ymm0,<b5=%ymm7,>r=%ymm7 vpand % ymm0, % ymm7, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#7,<r5=reg256#7 # asm 2: vpxor <r=%ymm7,<r5=%ymm6,<r5=%ymm6 vpxor % ymm7, % ymm6, % ymm6 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#7,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm6,160(<ptr=%rcx) vmovupd % ymm6, 160( % rcx) # qhasm: r = a0 & b4 # asm 1: vpand <a0=reg256#1,<b4=reg256#6,>r=reg256#6 # asm 2: vpand <a0=%ymm0,<b4=%ymm5,>r=%ymm5 vpand % ymm0, % ymm5, % ymm5 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#6,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm5,<r4=%ymm2,<r4=%ymm2 vpxor % ymm5, % ymm2, % ymm2 # qhasm: r = a0 & b3 # asm 1: vpand <a0=reg256#1,<b3=reg256#10,>r=reg256#6 # asm 2: vpand <a0=%ymm0,<b3=%ymm9,>r=%ymm5 vpand % ymm0, % ymm9, % ymm5 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#6,<r3=reg256#9,<r3=reg256#9 # asm 2: vpxor <r=%ymm5,<r3=%ymm8,<r3=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r = a0 & b2 # asm 1: vpand <a0=reg256#1,<b2=reg256#5,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b2=%ymm4,>r=%ymm4 vpand % ymm0, % ymm4, % ymm4 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#5,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm4,<r2=%ymm1,<r2=%ymm1 vpxor % ymm4, % ymm1, % ymm1 # qhasm: r = a0 & b1 # asm 1: vpand <a0=reg256#1,<b1=reg256#13,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b1=%ymm12,>r=%ymm4 vpand % ymm0, % ymm12, % ymm4 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#5,<r1=reg256#11,<r1=reg256#11 # asm 2: vpxor <r=%ymm4,<r1=%ymm10,<r1=%ymm10 vpxor % ymm4, % ymm10, % ymm10 # qhasm: r0 = a0 & b0 # asm 1: vpand <a0=reg256#1,<b0=reg256#4,>r0=reg256#1 # asm 2: vpand <a0=%ymm0,<b0=%ymm3,>r0=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#3,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm2,128(<ptr=%rcx) vmovupd % ymm2, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#9,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm8,96(<ptr=%rcx) vmovupd % ymm8, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#2,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm1,64(<ptr=%rcx) vmovupd % ymm1, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#11,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm10,32(<ptr=%rcx) vmovupd % ymm10, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%rcx) vmovupd % ymm0, 0( % rcx) # qhasm: h22 = mem64[ ptr + 344 ] # asm 1: movq 344(<ptr=int64#4),>h22=int64#2 # asm 2: movq 344(<ptr=%rcx),>h22=%rsi movq 344( % rcx), % rsi # qhasm: h13 = h22 # asm 1: mov <h22=int64#2,>h13=int64#3 # asm 2: mov <h22=%rsi,>h13=%rdx mov % rsi, % rdx # qhasm: h10 = h22 # asm 1: mov <h22=int64#2,>h10=int64#2 # asm 2: mov <h22=%rsi,>h10=%rsi mov % rsi, % rsi # qhasm: h21 = mem64[ ptr + 336 ] # asm 1: movq 336(<ptr=int64#4),>h21=int64#5 # asm 2: movq 336(<ptr=%rcx),>h21=%r8 movq 336( % rcx), % r8 # qhasm: h21 ^= *(uint64 *) ( ptr + 328 ) # asm 1: xorq 328(<ptr=int64#4),<h21=int64#5 # asm 2: xorq 328(<ptr=%rcx),<h21=%r8 xorq 328( % rcx), % r8 # qhasm: h12 = h21 # asm 1: mov <h21=int64#5,>h12=int64#6 # asm 2: mov <h21=%r8,>h12=%r9 mov % r8, % r9 # qhasm: h9 = h21 # asm 1: mov <h21=int64#5,>h9=int64#5 # asm 2: mov <h21=%r8,>h9=%r8 mov % r8, % r8 # qhasm: h20 = mem64[ ptr + 312 ] # asm 1: movq 312(<ptr=int64#4),>h20=int64#7 # asm 2: movq 312(<ptr=%rcx),>h20=%rax movq 312( % rcx), % rax # qhasm: h20 ^= *(uint64 *) ( ptr + 320 ) # asm 1: xorq 320(<ptr=int64#4),<h20=int64#7 # asm 2: xorq 320(<ptr=%rcx),<h20=%rax xorq 320( % rcx), % rax # qhasm: h11 = h20 # asm 1: mov <h20=int64#7,>h11=int64#8 # asm 2: mov <h20=%rax,>h11=%r10 mov % rax, % r10 # qhasm: h8 = h20 # asm 1: mov <h20=int64#7,>h8=int64#7 # asm 2: mov <h20=%rax,>h8=%rax mov % rax, % rax # qhasm: h19 = mem64[ ptr + 304 ] # asm 1: movq 304(<ptr=int64#4),>h19=int64#9 # asm 2: movq 304(<ptr=%rcx),>h19=%r11 movq 304( % rcx), % r11 # qhasm: h19 ^= *(uint64 *) ( ptr + 296 ) # asm 1: xorq 296(<ptr=int64#4),<h19=int64#9 # asm 2: xorq 296(<ptr=%rcx),<h19=%r11 xorq 296( % rcx), % r11 # qhasm: h10 ^= h19 # asm 1: xor <h19=int64#9,<h10=int64#2 # asm 2: xor <h19=%r11,<h10=%rsi xor % r11, % rsi # qhasm: h7 = h19 # asm 1: mov <h19=int64#9,>h7=int64#9 # asm 2: mov <h19=%r11,>h7=%r11 mov % r11, % r11 # qhasm: h18 = mem64[ ptr + 280 ] # asm 1: movq 280(<ptr=int64#4),>h18=int64#10 # asm 2: movq 280(<ptr=%rcx),>h18=%r12 movq 280( % rcx), % r12 # qhasm: h18 ^= *(uint64 *) ( ptr + 288 ) # asm 1: xorq 288(<ptr=int64#4),<h18=int64#10 # asm 2: xorq 288(<ptr=%rcx),<h18=%r12 xorq 288( % rcx), % r12 # qhasm: h9 ^= h18 # asm 1: xor <h18=int64#10,<h9=int64#5 # asm 2: xor <h18=%r12,<h9=%r8 xor % r12, % r8 # qhasm: h6 = h18 # asm 1: mov <h18=int64#10,>h6=int64#10 # asm 2: mov <h18=%r12,>h6=%r12 mov % r12, % r12 # qhasm: h17 = mem64[ ptr + 272 ] # asm 1: movq 272(<ptr=int64#4),>h17=int64#11 # asm 2: movq 272(<ptr=%rcx),>h17=%r13 movq 272( % rcx), % r13 # qhasm: h17 ^= *(uint64 *) ( ptr + 264 ) # asm 1: xorq 264(<ptr=int64#4),<h17=int64#11 # asm 2: xorq 264(<ptr=%rcx),<h17=%r13 xorq 264( % rcx), % r13 # qhasm: h8 ^= h17 # asm 1: xor <h17=int64#11,<h8=int64#7 # asm 2: xor <h17=%r13,<h8=%rax xor % r13, % rax # qhasm: h5 = h17 # asm 1: mov <h17=int64#11,>h5=int64#11 # asm 2: mov <h17=%r13,>h5=%r13 mov % r13, % r13 # qhasm: h16 = mem64[ ptr + 248 ] # asm 1: movq 248(<ptr=int64#4),>h16=int64#12 # asm 2: movq 248(<ptr=%rcx),>h16=%r14 movq 248( % rcx), % r14 # qhasm: h16 ^= *(uint64 *) ( ptr + 256 ) # asm 1: xorq 256(<ptr=int64#4),<h16=int64#12 # asm 2: xorq 256(<ptr=%rcx),<h16=%r14 xorq 256( % rcx), % r14 # qhasm: h7 ^= h16 # asm 1: xor <h16=int64#12,<h7=int64#9 # asm 2: xor <h16=%r14,<h7=%r11 xor % r14, % r11 # qhasm: h4 = h16 # asm 1: mov <h16=int64#12,>h4=int64#12 # asm 2: mov <h16=%r14,>h4=%r14 mov % r14, % r14 # qhasm: h15 = mem64[ ptr + 240 ] # asm 1: movq 240(<ptr=int64#4),>h15=int64#13 # asm 2: movq 240(<ptr=%rcx),>h15=%r15 movq 240( % rcx), % r15 # qhasm: h15 ^= *(uint64 *) ( ptr + 232 ) # asm 1: xorq 232(<ptr=int64#4),<h15=int64#13 # asm 2: xorq 232(<ptr=%rcx),<h15=%r15 xorq 232( % rcx), % r15 # qhasm: h6 ^= h15 # asm 1: xor <h15=int64#13,<h6=int64#10 # asm 2: xor <h15=%r15,<h6=%r12 xor % r15, % r12 # qhasm: h3 = h15 # asm 1: mov <h15=int64#13,>h3=int64#13 # asm 2: mov <h15=%r15,>h3=%r15 mov % r15, % r15 # qhasm: h14 = mem64[ ptr + 216 ] # asm 1: movq 216(<ptr=int64#4),>h14=int64#14 # asm 2: movq 216(<ptr=%rcx),>h14=%rbx movq 216( % rcx), % rbx # qhasm: h14 ^= *(uint64 *) ( ptr + 224 ) # asm 1: xorq 224(<ptr=int64#4),<h14=int64#14 # asm 2: xorq 224(<ptr=%rcx),<h14=%rbx xorq 224( % rcx), % rbx # qhasm: h5 ^= h14 # asm 1: xor <h14=int64#14,<h5=int64#11 # asm 2: xor <h14=%rbx,<h5=%r13 xor % rbx, % r13 # qhasm: h2 = h14 # asm 1: mov <h14=int64#14,>h2=int64#14 # asm 2: mov <h14=%rbx,>h2=%rbx mov % rbx, % rbx # qhasm: h13 ^= *(uint64 *) ( ptr + 208 ) # asm 1: xorq 208(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 208(<ptr=%rcx),<h13=%rdx xorq 208( % rcx), % rdx # qhasm: h13 ^= *(uint64 *) ( ptr + 200 ) # asm 1: xorq 200(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 200(<ptr=%rcx),<h13=%rdx xorq 200( % rcx), % rdx # qhasm: h4 ^= h13 # asm 1: xor <h13=int64#3,<h4=int64#12 # asm 2: xor <h13=%rdx,<h4=%r14 xor % rdx, % r14 # qhasm: h1 = h13 # asm 1: mov <h13=int64#3,>h1=int64#3 # asm 2: mov <h13=%rdx,>h1=%rdx mov % rdx, % rdx # qhasm: h12 ^= *(uint64 *) ( ptr + 184 ) # asm 1: xorq 184(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 184(<ptr=%rcx),<h12=%r9 xorq 184( % rcx), % r9 # qhasm: h12 ^= *(uint64 *) ( ptr + 192 ) # asm 1: xorq 192(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 192(<ptr=%rcx),<h12=%r9 xorq 192( % rcx), % r9 # qhasm: h3 ^= h12 # asm 1: xor <h12=int64#6,<h3=int64#13 # asm 2: xor <h12=%r9,<h3=%r15 xor % r9, % r15 # qhasm: h0 = h12 # asm 1: mov <h12=int64#6,>h0=int64#6 # asm 2: mov <h12=%r9,>h0=%r9 mov % r9, % r9 # qhasm: h11 ^= *(uint64 *) ( ptr + 176 ) # asm 1: xorq 176(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 176(<ptr=%rcx),<h11=%r10 xorq 176( % rcx), % r10 # qhasm: h11 ^= *(uint64 *) ( ptr + 168 ) # asm 1: xorq 168(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 168(<ptr=%rcx),<h11=%r10 xorq 168( % rcx), % r10 # qhasm: mem64[ input_0 + 88 ] = h11 # asm 1: movq <h11=int64#8,88(<input_0=int64#1) # asm 2: movq <h11=%r10,88(<input_0=%rdi) movq % r10, 88( % rdi) # qhasm: h10 ^= *(uint64 *) ( ptr + 152 ) # asm 1: xorq 152(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 152(<ptr=%rcx),<h10=%rsi xorq 152( % rcx), % rsi # qhasm: h10 ^= *(uint64 *) ( ptr + 160 ) # asm 1: xorq 160(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 160(<ptr=%rcx),<h10=%rsi xorq 160( % rcx), % rsi # qhasm: mem64[ input_0 + 80 ] = h10 # asm 1: movq <h10=int64#2,80(<input_0=int64#1) # asm 2: movq <h10=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: h9 ^= *(uint64 *) ( ptr + 144 ) # asm 1: xorq 144(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 144(<ptr=%rcx),<h9=%r8 xorq 144( % rcx), % r8 # qhasm: h9 ^= *(uint64 *) ( ptr + 136 ) # asm 1: xorq 136(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 136(<ptr=%rcx),<h9=%r8 xorq 136( % rcx), % r8 # qhasm: mem64[ input_0 + 72 ] = h9 # asm 1: movq <h9=int64#5,72(<input_0=int64#1) # asm 2: movq <h9=%r8,72(<input_0=%rdi) movq % r8, 72( % rdi) # qhasm: h8 ^= *(uint64 *) ( ptr + 120 ) # asm 1: xorq 120(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 120(<ptr=%rcx),<h8=%rax xorq 120( % rcx), % rax # qhasm: h8 ^= *(uint64 *) ( ptr + 128 ) # asm 1: xorq 128(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 128(<ptr=%rcx),<h8=%rax xorq 128( % rcx), % rax # qhasm: mem64[ input_0 + 64 ] = h8 # asm 1: movq <h8=int64#7,64(<input_0=int64#1) # asm 2: movq <h8=%rax,64(<input_0=%rdi) movq % rax, 64( % rdi) # qhasm: h7 ^= *(uint64 *) ( ptr + 112 ) # asm 1: xorq 112(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 112(<ptr=%rcx),<h7=%r11 xorq 112( % rcx), % r11 # qhasm: h7 ^= *(uint64 *) ( ptr + 104 ) # asm 1: xorq 104(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 104(<ptr=%rcx),<h7=%r11 xorq 104( % rcx), % r11 # qhasm: mem64[ input_0 + 56 ] = h7 # asm 1: movq <h7=int64#9,56(<input_0=int64#1) # asm 2: movq <h7=%r11,56(<input_0=%rdi) movq % r11, 56( % rdi) # qhasm: h6 ^= *(uint64 *) ( ptr + 88 ) # asm 1: xorq 88(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 88(<ptr=%rcx),<h6=%r12 xorq 88( % rcx), % r12 # qhasm: h6 ^= *(uint64 *) ( ptr + 96 ) # asm 1: xorq 96(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 96(<ptr=%rcx),<h6=%r12 xorq 96( % rcx), % r12 # qhasm: mem64[ input_0 + 48 ] = h6 # asm 1: movq <h6=int64#10,48(<input_0=int64#1) # asm 2: movq <h6=%r12,48(<input_0=%rdi) movq % r12, 48( % rdi) # qhasm: h5 ^= *(uint64 *) ( ptr + 80 ) # asm 1: xorq 80(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 80(<ptr=%rcx),<h5=%r13 xorq 80( % rcx), % r13 # qhasm: h5 ^= *(uint64 *) ( ptr + 72 ) # asm 1: xorq 72(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 72(<ptr=%rcx),<h5=%r13 xorq 72( % rcx), % r13 # qhasm: mem64[ input_0 + 40 ] = h5 # asm 1: movq <h5=int64#11,40(<input_0=int64#1) # asm 2: movq <h5=%r13,40(<input_0=%rdi) movq % r13, 40( % rdi) # qhasm: h4 ^= *(uint64 *) ( ptr + 56 ) # asm 1: xorq 56(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 56(<ptr=%rcx),<h4=%r14 xorq 56( % rcx), % r14 # qhasm: h4 ^= *(uint64 *) ( ptr + 64 ) # asm 1: xorq 64(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 64(<ptr=%rcx),<h4=%r14 xorq 64( % rcx), % r14 # qhasm: mem64[ input_0 + 32 ] = h4 # asm 1: movq <h4=int64#12,32(<input_0=int64#1) # asm 2: movq <h4=%r14,32(<input_0=%rdi) movq % r14, 32( % rdi) # qhasm: h3 ^= *(uint64 *) ( ptr + 48 ) # asm 1: xorq 48(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 48(<ptr=%rcx),<h3=%r15 xorq 48( % rcx), % r15 # qhasm: h3 ^= *(uint64 *) ( ptr + 40 ) # asm 1: xorq 40(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 40(<ptr=%rcx),<h3=%r15 xorq 40( % rcx), % r15 # qhasm: mem64[ input_0 + 24 ] = h3 # asm 1: movq <h3=int64#13,24(<input_0=int64#1) # asm 2: movq <h3=%r15,24(<input_0=%rdi) movq % r15, 24( % rdi) # qhasm: h2 ^= *(uint64 *) ( ptr + 24 ) # asm 1: xorq 24(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 24(<ptr=%rcx),<h2=%rbx xorq 24( % rcx), % rbx # qhasm: h2 ^= *(uint64 *) ( ptr + 32 ) # asm 1: xorq 32(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 32(<ptr=%rcx),<h2=%rbx xorq 32( % rcx), % rbx # qhasm: mem64[ input_0 + 16 ] = h2 # asm 1: movq <h2=int64#14,16(<input_0=int64#1) # asm 2: movq <h2=%rbx,16(<input_0=%rdi) movq % rbx, 16( % rdi) # qhasm: h1 ^= *(uint64 *) ( ptr + 16 ) # asm 1: xorq 16(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 16(<ptr=%rcx),<h1=%rdx xorq 16( % rcx), % rdx # qhasm: h1 ^= *(uint64 *) ( ptr + 8 ) # asm 1: xorq 8(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 8(<ptr=%rcx),<h1=%rdx xorq 8( % rcx), % rdx # qhasm: mem64[ input_0 + 8 ] = h1 # asm 1: movq <h1=int64#3,8(<input_0=int64#1) # asm 2: movq <h1=%rdx,8(<input_0=%rdi) movq % rdx, 8( % rdi) # qhasm: h0 ^= *(uint64 *) ( ptr + 0 ) # asm 1: xorq 0(<ptr=int64#4),<h0=int64#6 # asm 2: xorq 0(<ptr=%rcx),<h0=%r9 xorq 0( % rcx), % r9 # qhasm: mem64[ input_0 + 0 ] = h0 # asm 1: movq <h0=int64#6,0(<input_0=int64#1) # asm 2: movq <h0=%r9,0(<input_0=%rdi) movq % r9, 0( % rdi) # qhasm: caller_r11 = r11_stack # asm 1: movq <r11_stack=stack64#1,>caller_r11=int64#9 # asm 2: movq <r11_stack=608(%rsp),>caller_r11=%r11 movq 608( % rsp), % r11 # qhasm: caller_r12 = r12_stack # asm 1: movq <r12_stack=stack64#2,>caller_r12=int64#10 # asm 2: movq <r12_stack=616(%rsp),>caller_r12=%r12 movq 616( % rsp), % r12 # qhasm: caller_r13 = r13_stack # asm 1: movq <r13_stack=stack64#3,>caller_r13=int64#11 # asm 2: movq <r13_stack=624(%rsp),>caller_r13=%r13 movq 624( % rsp), % r13 # qhasm: caller_r14 = r14_stack # asm 1: movq <r14_stack=stack64#4,>caller_r14=int64#12 # asm 2: movq <r14_stack=632(%rsp),>caller_r14=%r14 movq 632( % rsp), % r14 # qhasm: caller_r15 = r15_stack # asm 1: movq <r15_stack=stack64#5,>caller_r15=int64#13 # asm 2: movq <r15_stack=640(%rsp),>caller_r15=%r15 movq 640( % rsp), % r15 # qhasm: caller_rbx = rbx_stack # asm 1: movq <rbx_stack=stack64#6,>caller_rbx=int64#14 # asm 2: movq <rbx_stack=648(%rsp),>caller_rbx=%rbx movq 648( % rsp), % rbx # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
29,381
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/vec_mul_sp_asm.S
#include "namespace.h" #define vec_mul_sp_asm CRYPTO_NAMESPACE(vec_mul_sp_asm) #define _vec_mul_sp_asm _CRYPTO_NAMESPACE(vec_mul_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 s0 # qhasm: reg256 s1 # qhasm: reg256 s2 # qhasm: reg256 s3 # qhasm: reg256 s4 # qhasm: reg256 s5 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: int64 h0 # qhasm: int64 h1 # qhasm: int64 h2 # qhasm: int64 h3 # qhasm: int64 h4 # qhasm: int64 h5 # qhasm: int64 h6 # qhasm: int64 h7 # qhasm: int64 h8 # qhasm: int64 h9 # qhasm: int64 h10 # qhasm: int64 h11 # qhasm: int64 h12 # qhasm: int64 h13 # qhasm: int64 h14 # qhasm: int64 h15 # qhasm: int64 h16 # qhasm: int64 h17 # qhasm: int64 h18 # qhasm: int64 h19 # qhasm: int64 h20 # qhasm: int64 h21 # qhasm: int64 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: enter vec_mul_sp_asm .p2align 5 .global _vec_mul_sp_asm .global vec_mul_sp_asm _vec_mul_sp_asm: vec_mul_sp_asm: mov % rsp, % r11 and $31, % r11 add $672, % r11 sub % r11, % rsp # qhasm: r11_stack = caller_r11 # asm 1: movq <caller_r11=int64#9,>r11_stack=stack64#1 # asm 2: movq <caller_r11=%r11,>r11_stack=608(%rsp) movq % r11, 608( % rsp) # qhasm: r12_stack = caller_r12 # asm 1: movq <caller_r12=int64#10,>r12_stack=stack64#2 # asm 2: movq <caller_r12=%r12,>r12_stack=616(%rsp) movq % r12, 616( % rsp) # qhasm: r13_stack = caller_r13 # asm 1: movq <caller_r13=int64#11,>r13_stack=stack64#3 # asm 2: movq <caller_r13=%r13,>r13_stack=624(%rsp) movq % r13, 624( % rsp) # qhasm: r14_stack = caller_r14 # asm 1: movq <caller_r14=int64#12,>r14_stack=stack64#4 # asm 2: movq <caller_r14=%r14,>r14_stack=632(%rsp) movq % r14, 632( % rsp) # qhasm: r15_stack = caller_r15 # asm 1: movq <caller_r15=int64#13,>r15_stack=stack64#5 # asm 2: movq <caller_r15=%r15,>r15_stack=640(%rsp) movq % r15, 640( % rsp) # qhasm: rbx_stack = caller_rbx # asm 1: movq <caller_rbx=int64#14,>rbx_stack=stack64#6 # asm 2: movq <caller_rbx=%rbx,>rbx_stack=648(%rsp) movq % rbx, 648( % rsp) # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: s0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>s0=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>s0=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: s1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>s1=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>s1=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: s2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>s2=reg256#3 # asm 2: vmovupd 64(<input_1=%rsi),>s2=%ymm2 vmovupd 64( % rsi), % ymm2 # qhasm: a5[0,1,2,3] = s2[2,2,3,3] # asm 1: vpermq $0xfa,<s2=reg256#3,>a5=reg256#4 # asm 2: vpermq $0xfa,<s2=%ymm2,>a5=%ymm3 vpermq $0xfa, % ymm2, % ymm3 # qhasm: r = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>r=reg256#5 # asm 2: vmovupd 160(<input_2=%rdx),>r=%ymm4 vmovupd 160( % rdx), % ymm4 # qhasm: b5[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#5,>b5=reg256#5 # asm 2: vpermq $0xdd,<r=%ymm4,>b5=%ymm4 vpermq $0xdd, % ymm4, % ymm4 # qhasm: r10 = a5 & b5 # asm 1: vpand <a5=reg256#4,<b5=reg256#5,>r10=reg256#6 # asm 2: vpand <a5=%ymm3,<b5=%ymm4,>r10=%ymm5 vpand % ymm3, % ymm4, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%rcx) vmovupd % ymm5, 320( % rcx) # qhasm: r = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>r=reg256#6 # asm 2: vmovupd 128(<input_2=%rdx),>r=%ymm5 vmovupd 128( % rdx), % ymm5 # qhasm: b4[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#6,>b4=reg256#6 # asm 2: vpermq $0xdd,<r=%ymm5,>b4=%ymm5 vpermq $0xdd, % ymm5, % ymm5 # qhasm: r9 = a5 & b4 # asm 1: vpand <a5=reg256#4,<b4=reg256#6,>r9=reg256#7 # asm 2: vpand <a5=%ymm3,<b4=%ymm5,>r9=%ymm6 vpand % ymm3, % ymm5, % ymm6 # qhasm: r = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>r=reg256#8 # asm 2: vmovupd 96(<input_2=%rdx),>r=%ymm7 vmovupd 96( % rdx), % ymm7 # qhasm: b3[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#8,>b3=reg256#8 # asm 2: vpermq $0xdd,<r=%ymm7,>b3=%ymm7 vpermq $0xdd, % ymm7, % ymm7 # qhasm: r8 = a5 & b3 # asm 1: vpand <a5=reg256#4,<b3=reg256#8,>r8=reg256#9 # asm 2: vpand <a5=%ymm3,<b3=%ymm7,>r8=%ymm8 vpand % ymm3, % ymm7, % ymm8 # qhasm: r = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>r=reg256#10 # asm 2: vmovupd 64(<input_2=%rdx),>r=%ymm9 vmovupd 64( % rdx), % ymm9 # qhasm: b2[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#10,>b2=reg256#10 # asm 2: vpermq $0xdd,<r=%ymm9,>b2=%ymm9 vpermq $0xdd, % ymm9, % ymm9 # qhasm: r7 = a5 & b2 # asm 1: vpand <a5=reg256#4,<b2=reg256#10,>r7=reg256#11 # asm 2: vpand <a5=%ymm3,<b2=%ymm9,>r7=%ymm10 vpand % ymm3, % ymm9, % ymm10 # qhasm: r = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>r=reg256#12 # asm 2: vmovupd 32(<input_2=%rdx),>r=%ymm11 vmovupd 32( % rdx), % ymm11 # qhasm: b1[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#12,>b1=reg256#12 # asm 2: vpermq $0xdd,<r=%ymm11,>b1=%ymm11 vpermq $0xdd, % ymm11, % ymm11 # qhasm: r6 = a5 & b1 # asm 1: vpand <a5=reg256#4,<b1=reg256#12,>r6=reg256#13 # asm 2: vpand <a5=%ymm3,<b1=%ymm11,>r6=%ymm12 vpand % ymm3, % ymm11, % ymm12 # qhasm: r = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>r=reg256#14 # asm 2: vmovupd 0(<input_2=%rdx),>r=%ymm13 vmovupd 0( % rdx), % ymm13 # qhasm: b0[0,1,2,3] = r[1,3,1,3] # asm 1: vpermq $0xdd,<r=reg256#14,>b0=reg256#14 # asm 2: vpermq $0xdd,<r=%ymm13,>b0=%ymm13 vpermq $0xdd, % ymm13, % ymm13 # qhasm: r5 = a5 & b0 # asm 1: vpand <a5=reg256#4,<b0=reg256#14,>r5=reg256#4 # asm 2: vpand <a5=%ymm3,<b0=%ymm13,>r5=%ymm3 vpand % ymm3, % ymm13, % ymm3 # qhasm: a4[0,1,2,3] = s2[0,0,1,1] # asm 1: vpermq $0x50,<s2=reg256#3,>a4=reg256#3 # asm 2: vpermq $0x50,<s2=%ymm2,>a4=%ymm2 vpermq $0x50, % ymm2, % ymm2 # qhasm: r = a4 & b5 # asm 1: vpand <a4=reg256#3,<b5=reg256#5,>r=reg256#15 # asm 2: vpand <a4=%ymm2,<b5=%ymm4,>r=%ymm14 vpand % ymm2, % ymm4, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#7,<r9=reg256#7 # asm 2: vpxor <r=%ymm14,<r9=%ymm6,<r9=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#7,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm6,288(<ptr=%rcx) vmovupd % ymm6, 288( % rcx) # qhasm: r = a4 & b4 # asm 1: vpand <a4=reg256#3,<b4=reg256#6,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b4=%ymm5,>r=%ymm6 vpand % ymm2, % ymm5, % ymm6 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#7,<r8=reg256#9,<r8=reg256#9 # asm 2: vpxor <r=%ymm6,<r8=%ymm8,<r8=%ymm8 vpxor % ymm6, % ymm8, % ymm8 # qhasm: r = a4 & b3 # asm 1: vpand <a4=reg256#3,<b3=reg256#8,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b3=%ymm7,>r=%ymm6 vpand % ymm2, % ymm7, % ymm6 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r = a4 & b2 # asm 1: vpand <a4=reg256#3,<b2=reg256#10,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b2=%ymm9,>r=%ymm6 vpand % ymm2, % ymm9, % ymm6 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#7,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm6,<r6=%ymm12,<r6=%ymm12 vpxor % ymm6, % ymm12, % ymm12 # qhasm: r = a4 & b1 # asm 1: vpand <a4=reg256#3,<b1=reg256#12,>r=reg256#7 # asm 2: vpand <a4=%ymm2,<b1=%ymm11,>r=%ymm6 vpand % ymm2, % ymm11, % ymm6 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#7,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm6,<r5=%ymm3,<r5=%ymm3 vpxor % ymm6, % ymm3, % ymm3 # qhasm: r4 = a4 & b0 # asm 1: vpand <a4=reg256#3,<b0=reg256#14,>r4=reg256#3 # asm 2: vpand <a4=%ymm2,<b0=%ymm13,>r4=%ymm2 vpand % ymm2, % ymm13, % ymm2 # qhasm: a3[0,1,2,3] = s1[2,2,3,3] # asm 1: vpermq $0xfa,<s1=reg256#2,>a3=reg256#7 # asm 2: vpermq $0xfa,<s1=%ymm1,>a3=%ymm6 vpermq $0xfa, % ymm1, % ymm6 # qhasm: r = a3 & b5 # asm 1: vpand <a3=reg256#7,<b5=reg256#5,>r=reg256#15 # asm 2: vpand <a3=%ymm6,<b5=%ymm4,>r=%ymm14 vpand % ymm6, % ymm4, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#9,<r8=reg256#9 # asm 2: vpxor <r=%ymm14,<r8=%ymm8,<r8=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#9,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm8,256(<ptr=%rcx) vmovupd % ymm8, 256( % rcx) # qhasm: r = a3 & b4 # asm 1: vpand <a3=reg256#7,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b4=%ymm5,>r=%ymm8 vpand % ymm6, % ymm5, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm8,<r7=%ymm10,<r7=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: r = a3 & b3 # asm 1: vpand <a3=reg256#7,<b3=reg256#8,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b3=%ymm7,>r=%ymm8 vpand % ymm6, % ymm7, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm8,<r6=%ymm12,<r6=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r = a3 & b2 # asm 1: vpand <a3=reg256#7,<b2=reg256#10,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b2=%ymm9,>r=%ymm8 vpand % ymm6, % ymm9, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm8,<r5=%ymm3,<r5=%ymm3 vpxor % ymm8, % ymm3, % ymm3 # qhasm: r = a3 & b1 # asm 1: vpand <a3=reg256#7,<b1=reg256#12,>r=reg256#9 # asm 2: vpand <a3=%ymm6,<b1=%ymm11,>r=%ymm8 vpand % ymm6, % ymm11, % ymm8 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#9,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm8,<r4=%ymm2,<r4=%ymm2 vpxor % ymm8, % ymm2, % ymm2 # qhasm: r3 = a3 & b0 # asm 1: vpand <a3=reg256#7,<b0=reg256#14,>r3=reg256#7 # asm 2: vpand <a3=%ymm6,<b0=%ymm13,>r3=%ymm6 vpand % ymm6, % ymm13, % ymm6 # qhasm: a2[0,1,2,3] = s1[0,0,1,1] # asm 1: vpermq $0x50,<s1=reg256#2,>a2=reg256#2 # asm 2: vpermq $0x50,<s1=%ymm1,>a2=%ymm1 vpermq $0x50, % ymm1, % ymm1 # qhasm: r = a2 & b5 # asm 1: vpand <a2=reg256#2,<b5=reg256#5,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b5=%ymm4,>r=%ymm8 vpand % ymm1, % ymm4, % ymm8 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#9,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm8,<r7=%ymm10,<r7=%ymm10 vpxor % ymm8, % ymm10, % ymm10 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm10,224(<ptr=%rcx) vmovupd % ymm10, 224( % rcx) # qhasm: r = a2 & b4 # asm 1: vpand <a2=reg256#2,<b4=reg256#6,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b4=%ymm5,>r=%ymm8 vpand % ymm1, % ymm5, % ymm8 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#9,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm8,<r6=%ymm12,<r6=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r = a2 & b3 # asm 1: vpand <a2=reg256#2,<b3=reg256#8,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b3=%ymm7,>r=%ymm8 vpand % ymm1, % ymm7, % ymm8 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#9,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm8,<r5=%ymm3,<r5=%ymm3 vpxor % ymm8, % ymm3, % ymm3 # qhasm: r = a2 & b2 # asm 1: vpand <a2=reg256#2,<b2=reg256#10,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b2=%ymm9,>r=%ymm8 vpand % ymm1, % ymm9, % ymm8 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#9,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm8,<r4=%ymm2,<r4=%ymm2 vpxor % ymm8, % ymm2, % ymm2 # qhasm: r = a2 & b1 # asm 1: vpand <a2=reg256#2,<b1=reg256#12,>r=reg256#9 # asm 2: vpand <a2=%ymm1,<b1=%ymm11,>r=%ymm8 vpand % ymm1, % ymm11, % ymm8 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#9,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm8,<r3=%ymm6,<r3=%ymm6 vpxor % ymm8, % ymm6, % ymm6 # qhasm: r2 = a2 & b0 # asm 1: vpand <a2=reg256#2,<b0=reg256#14,>r2=reg256#2 # asm 2: vpand <a2=%ymm1,<b0=%ymm13,>r2=%ymm1 vpand % ymm1, % ymm13, % ymm1 # qhasm: a1[0,1,2,3] = s0[2,2,3,3] # asm 1: vpermq $0xfa,<s0=reg256#1,>a1=reg256#9 # asm 2: vpermq $0xfa,<s0=%ymm0,>a1=%ymm8 vpermq $0xfa, % ymm0, % ymm8 # qhasm: r = a1 & b5 # asm 1: vpand <a1=reg256#9,<b5=reg256#5,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b5=%ymm4,>r=%ymm10 vpand % ymm8, % ymm4, % ymm10 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#11,<r6=reg256#13,<r6=reg256#13 # asm 2: vpxor <r=%ymm10,<r6=%ymm12,<r6=%ymm12 vpxor % ymm10, % ymm12, % ymm12 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#13,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm12,192(<ptr=%rcx) vmovupd % ymm12, 192( % rcx) # qhasm: r = a1 & b4 # asm 1: vpand <a1=reg256#9,<b4=reg256#6,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b4=%ymm5,>r=%ymm10 vpand % ymm8, % ymm5, % ymm10 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#11,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm10,<r5=%ymm3,<r5=%ymm3 vpxor % ymm10, % ymm3, % ymm3 # qhasm: r = a1 & b3 # asm 1: vpand <a1=reg256#9,<b3=reg256#8,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b3=%ymm7,>r=%ymm10 vpand % ymm8, % ymm7, % ymm10 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#11,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm10,<r4=%ymm2,<r4=%ymm2 vpxor % ymm10, % ymm2, % ymm2 # qhasm: r = a1 & b2 # asm 1: vpand <a1=reg256#9,<b2=reg256#10,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b2=%ymm9,>r=%ymm10 vpand % ymm8, % ymm9, % ymm10 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#11,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm10,<r3=%ymm6,<r3=%ymm6 vpxor % ymm10, % ymm6, % ymm6 # qhasm: r = a1 & b1 # asm 1: vpand <a1=reg256#9,<b1=reg256#12,>r=reg256#11 # asm 2: vpand <a1=%ymm8,<b1=%ymm11,>r=%ymm10 vpand % ymm8, % ymm11, % ymm10 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#11,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm10,<r2=%ymm1,<r2=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r1 = a1 & b0 # asm 1: vpand <a1=reg256#9,<b0=reg256#14,>r1=reg256#9 # asm 2: vpand <a1=%ymm8,<b0=%ymm13,>r1=%ymm8 vpand % ymm8, % ymm13, % ymm8 # qhasm: a0[0,1,2,3] = s0[0,0,1,1] # asm 1: vpermq $0x50,<s0=reg256#1,>a0=reg256#1 # asm 2: vpermq $0x50,<s0=%ymm0,>a0=%ymm0 vpermq $0x50, % ymm0, % ymm0 # qhasm: r = a0 & b5 # asm 1: vpand <a0=reg256#1,<b5=reg256#5,>r=reg256#5 # asm 2: vpand <a0=%ymm0,<b5=%ymm4,>r=%ymm4 vpand % ymm0, % ymm4, % ymm4 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#5,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm4,<r5=%ymm3,<r5=%ymm3 vpxor % ymm4, % ymm3, % ymm3 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%rcx) vmovupd % ymm3, 160( % rcx) # qhasm: r = a0 & b4 # asm 1: vpand <a0=reg256#1,<b4=reg256#6,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b4=%ymm5,>r=%ymm3 vpand % ymm0, % ymm5, % ymm3 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#4,<r4=reg256#3,<r4=reg256#3 # asm 2: vpxor <r=%ymm3,<r4=%ymm2,<r4=%ymm2 vpxor % ymm3, % ymm2, % ymm2 # qhasm: r = a0 & b3 # asm 1: vpand <a0=reg256#1,<b3=reg256#8,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b3=%ymm7,>r=%ymm3 vpand % ymm0, % ymm7, % ymm3 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r = a0 & b2 # asm 1: vpand <a0=reg256#1,<b2=reg256#10,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b2=%ymm9,>r=%ymm3 vpand % ymm0, % ymm9, % ymm3 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#4,<r2=reg256#2,<r2=reg256#2 # asm 2: vpxor <r=%ymm3,<r2=%ymm1,<r2=%ymm1 vpxor % ymm3, % ymm1, % ymm1 # qhasm: r = a0 & b1 # asm 1: vpand <a0=reg256#1,<b1=reg256#12,>r=reg256#4 # asm 2: vpand <a0=%ymm0,<b1=%ymm11,>r=%ymm3 vpand % ymm0, % ymm11, % ymm3 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#4,<r1=reg256#9,<r1=reg256#9 # asm 2: vpxor <r=%ymm3,<r1=%ymm8,<r1=%ymm8 vpxor % ymm3, % ymm8, % ymm8 # qhasm: r0 = a0 & b0 # asm 1: vpand <a0=reg256#1,<b0=reg256#14,>r0=reg256#1 # asm 2: vpand <a0=%ymm0,<b0=%ymm13,>r0=%ymm0 vpand % ymm0, % ymm13, % ymm0 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#3,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm2,128(<ptr=%rcx) vmovupd % ymm2, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm6,96(<ptr=%rcx) vmovupd % ymm6, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#2,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm1,64(<ptr=%rcx) vmovupd % ymm1, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#9,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm8,32(<ptr=%rcx) vmovupd % ymm8, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%rcx) vmovupd % ymm0, 0( % rcx) # qhasm: h22 = mem64[ ptr + 344 ] # asm 1: movq 344(<ptr=int64#4),>h22=int64#2 # asm 2: movq 344(<ptr=%rcx),>h22=%rsi movq 344( % rcx), % rsi # qhasm: h13 = h22 # asm 1: mov <h22=int64#2,>h13=int64#3 # asm 2: mov <h22=%rsi,>h13=%rdx mov % rsi, % rdx # qhasm: h10 = h22 # asm 1: mov <h22=int64#2,>h10=int64#2 # asm 2: mov <h22=%rsi,>h10=%rsi mov % rsi, % rsi # qhasm: h21 = mem64[ ptr + 336 ] # asm 1: movq 336(<ptr=int64#4),>h21=int64#5 # asm 2: movq 336(<ptr=%rcx),>h21=%r8 movq 336( % rcx), % r8 # qhasm: h21 ^= *(uint64 *) ( ptr + 328 ) # asm 1: xorq 328(<ptr=int64#4),<h21=int64#5 # asm 2: xorq 328(<ptr=%rcx),<h21=%r8 xorq 328( % rcx), % r8 # qhasm: h12 = h21 # asm 1: mov <h21=int64#5,>h12=int64#6 # asm 2: mov <h21=%r8,>h12=%r9 mov % r8, % r9 # qhasm: h9 = h21 # asm 1: mov <h21=int64#5,>h9=int64#5 # asm 2: mov <h21=%r8,>h9=%r8 mov % r8, % r8 # qhasm: h20 = mem64[ ptr + 312 ] # asm 1: movq 312(<ptr=int64#4),>h20=int64#7 # asm 2: movq 312(<ptr=%rcx),>h20=%rax movq 312( % rcx), % rax # qhasm: h20 ^= *(uint64 *) ( ptr + 320 ) # asm 1: xorq 320(<ptr=int64#4),<h20=int64#7 # asm 2: xorq 320(<ptr=%rcx),<h20=%rax xorq 320( % rcx), % rax # qhasm: h11 = h20 # asm 1: mov <h20=int64#7,>h11=int64#8 # asm 2: mov <h20=%rax,>h11=%r10 mov % rax, % r10 # qhasm: h8 = h20 # asm 1: mov <h20=int64#7,>h8=int64#7 # asm 2: mov <h20=%rax,>h8=%rax mov % rax, % rax # qhasm: h19 = mem64[ ptr + 304 ] # asm 1: movq 304(<ptr=int64#4),>h19=int64#9 # asm 2: movq 304(<ptr=%rcx),>h19=%r11 movq 304( % rcx), % r11 # qhasm: h19 ^= *(uint64 *) ( ptr + 296 ) # asm 1: xorq 296(<ptr=int64#4),<h19=int64#9 # asm 2: xorq 296(<ptr=%rcx),<h19=%r11 xorq 296( % rcx), % r11 # qhasm: h10 ^= h19 # asm 1: xor <h19=int64#9,<h10=int64#2 # asm 2: xor <h19=%r11,<h10=%rsi xor % r11, % rsi # qhasm: h7 = h19 # asm 1: mov <h19=int64#9,>h7=int64#9 # asm 2: mov <h19=%r11,>h7=%r11 mov % r11, % r11 # qhasm: h18 = mem64[ ptr + 280 ] # asm 1: movq 280(<ptr=int64#4),>h18=int64#10 # asm 2: movq 280(<ptr=%rcx),>h18=%r12 movq 280( % rcx), % r12 # qhasm: h18 ^= *(uint64 *) ( ptr + 288 ) # asm 1: xorq 288(<ptr=int64#4),<h18=int64#10 # asm 2: xorq 288(<ptr=%rcx),<h18=%r12 xorq 288( % rcx), % r12 # qhasm: h9 ^= h18 # asm 1: xor <h18=int64#10,<h9=int64#5 # asm 2: xor <h18=%r12,<h9=%r8 xor % r12, % r8 # qhasm: h6 = h18 # asm 1: mov <h18=int64#10,>h6=int64#10 # asm 2: mov <h18=%r12,>h6=%r12 mov % r12, % r12 # qhasm: h17 = mem64[ ptr + 272 ] # asm 1: movq 272(<ptr=int64#4),>h17=int64#11 # asm 2: movq 272(<ptr=%rcx),>h17=%r13 movq 272( % rcx), % r13 # qhasm: h17 ^= *(uint64 *) ( ptr + 264 ) # asm 1: xorq 264(<ptr=int64#4),<h17=int64#11 # asm 2: xorq 264(<ptr=%rcx),<h17=%r13 xorq 264( % rcx), % r13 # qhasm: h8 ^= h17 # asm 1: xor <h17=int64#11,<h8=int64#7 # asm 2: xor <h17=%r13,<h8=%rax xor % r13, % rax # qhasm: h5 = h17 # asm 1: mov <h17=int64#11,>h5=int64#11 # asm 2: mov <h17=%r13,>h5=%r13 mov % r13, % r13 # qhasm: h16 = mem64[ ptr + 248 ] # asm 1: movq 248(<ptr=int64#4),>h16=int64#12 # asm 2: movq 248(<ptr=%rcx),>h16=%r14 movq 248( % rcx), % r14 # qhasm: h16 ^= *(uint64 *) ( ptr + 256 ) # asm 1: xorq 256(<ptr=int64#4),<h16=int64#12 # asm 2: xorq 256(<ptr=%rcx),<h16=%r14 xorq 256( % rcx), % r14 # qhasm: h7 ^= h16 # asm 1: xor <h16=int64#12,<h7=int64#9 # asm 2: xor <h16=%r14,<h7=%r11 xor % r14, % r11 # qhasm: h4 = h16 # asm 1: mov <h16=int64#12,>h4=int64#12 # asm 2: mov <h16=%r14,>h4=%r14 mov % r14, % r14 # qhasm: h15 = mem64[ ptr + 240 ] # asm 1: movq 240(<ptr=int64#4),>h15=int64#13 # asm 2: movq 240(<ptr=%rcx),>h15=%r15 movq 240( % rcx), % r15 # qhasm: h15 ^= *(uint64 *) ( ptr + 232 ) # asm 1: xorq 232(<ptr=int64#4),<h15=int64#13 # asm 2: xorq 232(<ptr=%rcx),<h15=%r15 xorq 232( % rcx), % r15 # qhasm: h6 ^= h15 # asm 1: xor <h15=int64#13,<h6=int64#10 # asm 2: xor <h15=%r15,<h6=%r12 xor % r15, % r12 # qhasm: h3 = h15 # asm 1: mov <h15=int64#13,>h3=int64#13 # asm 2: mov <h15=%r15,>h3=%r15 mov % r15, % r15 # qhasm: h14 = mem64[ ptr + 216 ] # asm 1: movq 216(<ptr=int64#4),>h14=int64#14 # asm 2: movq 216(<ptr=%rcx),>h14=%rbx movq 216( % rcx), % rbx # qhasm: h14 ^= *(uint64 *) ( ptr + 224 ) # asm 1: xorq 224(<ptr=int64#4),<h14=int64#14 # asm 2: xorq 224(<ptr=%rcx),<h14=%rbx xorq 224( % rcx), % rbx # qhasm: h5 ^= h14 # asm 1: xor <h14=int64#14,<h5=int64#11 # asm 2: xor <h14=%rbx,<h5=%r13 xor % rbx, % r13 # qhasm: h2 = h14 # asm 1: mov <h14=int64#14,>h2=int64#14 # asm 2: mov <h14=%rbx,>h2=%rbx mov % rbx, % rbx # qhasm: h13 ^= *(uint64 *) ( ptr + 208 ) # asm 1: xorq 208(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 208(<ptr=%rcx),<h13=%rdx xorq 208( % rcx), % rdx # qhasm: h13 ^= *(uint64 *) ( ptr + 200 ) # asm 1: xorq 200(<ptr=int64#4),<h13=int64#3 # asm 2: xorq 200(<ptr=%rcx),<h13=%rdx xorq 200( % rcx), % rdx # qhasm: h4 ^= h13 # asm 1: xor <h13=int64#3,<h4=int64#12 # asm 2: xor <h13=%rdx,<h4=%r14 xor % rdx, % r14 # qhasm: h1 = h13 # asm 1: mov <h13=int64#3,>h1=int64#3 # asm 2: mov <h13=%rdx,>h1=%rdx mov % rdx, % rdx # qhasm: h12 ^= *(uint64 *) ( ptr + 184 ) # asm 1: xorq 184(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 184(<ptr=%rcx),<h12=%r9 xorq 184( % rcx), % r9 # qhasm: h12 ^= *(uint64 *) ( ptr + 192 ) # asm 1: xorq 192(<ptr=int64#4),<h12=int64#6 # asm 2: xorq 192(<ptr=%rcx),<h12=%r9 xorq 192( % rcx), % r9 # qhasm: h3 ^= h12 # asm 1: xor <h12=int64#6,<h3=int64#13 # asm 2: xor <h12=%r9,<h3=%r15 xor % r9, % r15 # qhasm: h0 = h12 # asm 1: mov <h12=int64#6,>h0=int64#6 # asm 2: mov <h12=%r9,>h0=%r9 mov % r9, % r9 # qhasm: h11 ^= *(uint64 *) ( ptr + 176 ) # asm 1: xorq 176(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 176(<ptr=%rcx),<h11=%r10 xorq 176( % rcx), % r10 # qhasm: h11 ^= *(uint64 *) ( ptr + 168 ) # asm 1: xorq 168(<ptr=int64#4),<h11=int64#8 # asm 2: xorq 168(<ptr=%rcx),<h11=%r10 xorq 168( % rcx), % r10 # qhasm: mem64[ input_0 + 88 ] = h11 # asm 1: movq <h11=int64#8,88(<input_0=int64#1) # asm 2: movq <h11=%r10,88(<input_0=%rdi) movq % r10, 88( % rdi) # qhasm: h10 ^= *(uint64 *) ( ptr + 152 ) # asm 1: xorq 152(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 152(<ptr=%rcx),<h10=%rsi xorq 152( % rcx), % rsi # qhasm: h10 ^= *(uint64 *) ( ptr + 160 ) # asm 1: xorq 160(<ptr=int64#4),<h10=int64#2 # asm 2: xorq 160(<ptr=%rcx),<h10=%rsi xorq 160( % rcx), % rsi # qhasm: mem64[ input_0 + 80 ] = h10 # asm 1: movq <h10=int64#2,80(<input_0=int64#1) # asm 2: movq <h10=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: h9 ^= *(uint64 *) ( ptr + 144 ) # asm 1: xorq 144(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 144(<ptr=%rcx),<h9=%r8 xorq 144( % rcx), % r8 # qhasm: h9 ^= *(uint64 *) ( ptr + 136 ) # asm 1: xorq 136(<ptr=int64#4),<h9=int64#5 # asm 2: xorq 136(<ptr=%rcx),<h9=%r8 xorq 136( % rcx), % r8 # qhasm: mem64[ input_0 + 72 ] = h9 # asm 1: movq <h9=int64#5,72(<input_0=int64#1) # asm 2: movq <h9=%r8,72(<input_0=%rdi) movq % r8, 72( % rdi) # qhasm: h8 ^= *(uint64 *) ( ptr + 120 ) # asm 1: xorq 120(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 120(<ptr=%rcx),<h8=%rax xorq 120( % rcx), % rax # qhasm: h8 ^= *(uint64 *) ( ptr + 128 ) # asm 1: xorq 128(<ptr=int64#4),<h8=int64#7 # asm 2: xorq 128(<ptr=%rcx),<h8=%rax xorq 128( % rcx), % rax # qhasm: mem64[ input_0 + 64 ] = h8 # asm 1: movq <h8=int64#7,64(<input_0=int64#1) # asm 2: movq <h8=%rax,64(<input_0=%rdi) movq % rax, 64( % rdi) # qhasm: h7 ^= *(uint64 *) ( ptr + 112 ) # asm 1: xorq 112(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 112(<ptr=%rcx),<h7=%r11 xorq 112( % rcx), % r11 # qhasm: h7 ^= *(uint64 *) ( ptr + 104 ) # asm 1: xorq 104(<ptr=int64#4),<h7=int64#9 # asm 2: xorq 104(<ptr=%rcx),<h7=%r11 xorq 104( % rcx), % r11 # qhasm: mem64[ input_0 + 56 ] = h7 # asm 1: movq <h7=int64#9,56(<input_0=int64#1) # asm 2: movq <h7=%r11,56(<input_0=%rdi) movq % r11, 56( % rdi) # qhasm: h6 ^= *(uint64 *) ( ptr + 88 ) # asm 1: xorq 88(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 88(<ptr=%rcx),<h6=%r12 xorq 88( % rcx), % r12 # qhasm: h6 ^= *(uint64 *) ( ptr + 96 ) # asm 1: xorq 96(<ptr=int64#4),<h6=int64#10 # asm 2: xorq 96(<ptr=%rcx),<h6=%r12 xorq 96( % rcx), % r12 # qhasm: mem64[ input_0 + 48 ] = h6 # asm 1: movq <h6=int64#10,48(<input_0=int64#1) # asm 2: movq <h6=%r12,48(<input_0=%rdi) movq % r12, 48( % rdi) # qhasm: h5 ^= *(uint64 *) ( ptr + 80 ) # asm 1: xorq 80(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 80(<ptr=%rcx),<h5=%r13 xorq 80( % rcx), % r13 # qhasm: h5 ^= *(uint64 *) ( ptr + 72 ) # asm 1: xorq 72(<ptr=int64#4),<h5=int64#11 # asm 2: xorq 72(<ptr=%rcx),<h5=%r13 xorq 72( % rcx), % r13 # qhasm: mem64[ input_0 + 40 ] = h5 # asm 1: movq <h5=int64#11,40(<input_0=int64#1) # asm 2: movq <h5=%r13,40(<input_0=%rdi) movq % r13, 40( % rdi) # qhasm: h4 ^= *(uint64 *) ( ptr + 56 ) # asm 1: xorq 56(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 56(<ptr=%rcx),<h4=%r14 xorq 56( % rcx), % r14 # qhasm: h4 ^= *(uint64 *) ( ptr + 64 ) # asm 1: xorq 64(<ptr=int64#4),<h4=int64#12 # asm 2: xorq 64(<ptr=%rcx),<h4=%r14 xorq 64( % rcx), % r14 # qhasm: mem64[ input_0 + 32 ] = h4 # asm 1: movq <h4=int64#12,32(<input_0=int64#1) # asm 2: movq <h4=%r14,32(<input_0=%rdi) movq % r14, 32( % rdi) # qhasm: h3 ^= *(uint64 *) ( ptr + 48 ) # asm 1: xorq 48(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 48(<ptr=%rcx),<h3=%r15 xorq 48( % rcx), % r15 # qhasm: h3 ^= *(uint64 *) ( ptr + 40 ) # asm 1: xorq 40(<ptr=int64#4),<h3=int64#13 # asm 2: xorq 40(<ptr=%rcx),<h3=%r15 xorq 40( % rcx), % r15 # qhasm: mem64[ input_0 + 24 ] = h3 # asm 1: movq <h3=int64#13,24(<input_0=int64#1) # asm 2: movq <h3=%r15,24(<input_0=%rdi) movq % r15, 24( % rdi) # qhasm: h2 ^= *(uint64 *) ( ptr + 24 ) # asm 1: xorq 24(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 24(<ptr=%rcx),<h2=%rbx xorq 24( % rcx), % rbx # qhasm: h2 ^= *(uint64 *) ( ptr + 32 ) # asm 1: xorq 32(<ptr=int64#4),<h2=int64#14 # asm 2: xorq 32(<ptr=%rcx),<h2=%rbx xorq 32( % rcx), % rbx # qhasm: mem64[ input_0 + 16 ] = h2 # asm 1: movq <h2=int64#14,16(<input_0=int64#1) # asm 2: movq <h2=%rbx,16(<input_0=%rdi) movq % rbx, 16( % rdi) # qhasm: h1 ^= *(uint64 *) ( ptr + 16 ) # asm 1: xorq 16(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 16(<ptr=%rcx),<h1=%rdx xorq 16( % rcx), % rdx # qhasm: h1 ^= *(uint64 *) ( ptr + 8 ) # asm 1: xorq 8(<ptr=int64#4),<h1=int64#3 # asm 2: xorq 8(<ptr=%rcx),<h1=%rdx xorq 8( % rcx), % rdx # qhasm: mem64[ input_0 + 8 ] = h1 # asm 1: movq <h1=int64#3,8(<input_0=int64#1) # asm 2: movq <h1=%rdx,8(<input_0=%rdi) movq % rdx, 8( % rdi) # qhasm: h0 ^= *(uint64 *) ( ptr + 0 ) # asm 1: xorq 0(<ptr=int64#4),<h0=int64#6 # asm 2: xorq 0(<ptr=%rcx),<h0=%r9 xorq 0( % rcx), % r9 # qhasm: mem64[ input_0 + 0 ] = h0 # asm 1: movq <h0=int64#6,0(<input_0=int64#1) # asm 2: movq <h0=%r9,0(<input_0=%rdi) movq % r9, 0( % rdi) # qhasm: caller_r11 = r11_stack # asm 1: movq <r11_stack=stack64#1,>caller_r11=int64#9 # asm 2: movq <r11_stack=608(%rsp),>caller_r11=%r11 movq 608( % rsp), % r11 # qhasm: caller_r12 = r12_stack # asm 1: movq <r12_stack=stack64#2,>caller_r12=int64#10 # asm 2: movq <r12_stack=616(%rsp),>caller_r12=%r12 movq 616( % rsp), % r12 # qhasm: caller_r13 = r13_stack # asm 1: movq <r13_stack=stack64#3,>caller_r13=int64#11 # asm 2: movq <r13_stack=624(%rsp),>caller_r13=%r13 movq 624( % rsp), % r13 # qhasm: caller_r14 = r14_stack # asm 1: movq <r14_stack=stack64#4,>caller_r14=int64#12 # asm 2: movq <r14_stack=632(%rsp),>caller_r14=%r14 movq 632( % rsp), % r14 # qhasm: caller_r15 = r15_stack # asm 1: movq <r15_stack=stack64#5,>caller_r15=int64#13 # asm 2: movq <r15_stack=640(%rsp),>caller_r15=%r15 movq 640( % rsp), % r15 # qhasm: caller_rbx = rbx_stack # asm 1: movq <rbx_stack=stack64#6,>caller_rbx=int64#14 # asm 2: movq <rbx_stack=648(%rsp),>caller_rbx=%rbx movq 648( % rsp), % rbx # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
8,801
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s1 = input_1 # asm 1: mov <input_1=int64#2,>s1=int64#2 # asm 2: mov <input_1=%rsi,>s1=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#2,<s0=int64#4 # asm 2: shrd $1,<s1=%rsi,<s0=%rcx shrd $1, % rsi, % rcx # qhasm: (uint64) s1 >>= 1 # asm 1: shr $1,<s1=int64#2 # asm 2: shr $1,<s1=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
40,155
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>ptr=%rcx leaq 0( % rsp), % rcx # qhasm: b11 = mem128[ input_2 + 176 ] x2 # asm 1: vbroadcasti128 176(<input_2=int64#3), >b11=reg256#1 # asm 2: vbroadcasti128 176(<input_2=%rdx), >b11=%ymm0 vbroadcasti128 176( % rdx), % ymm0 # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#2,<a5=reg256#2 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm1,<a5=%ymm1 vinsertf128 $0x0, 80( % rsi), % ymm1, % ymm1 # qhasm: a5[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a5=reg256#2,<a5=reg256#2 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a5=%ymm1,<a5=%ymm1 vinsertf128 $0x1, 176( % rsi), % ymm1, % ymm1 # qhasm: r16 = b11 & a5 # asm 1: vpand <b11=reg256#1,<a5=reg256#2,>r16=reg256#3 # asm 2: vpand <b11=%ymm0,<a5=%ymm1,>r16=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#3,512(<ptr=int64#4) # asm 2: vmovupd <r16=%ymm2,512(<ptr=%rcx) vmovupd % ymm2, 512( % rcx) # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#3,<a4=reg256#3 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm2,<a4=%ymm2 vinsertf128 $0x0, 64( % rsi), % ymm2, % ymm2 # qhasm: a4[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a4=reg256#3,<a4=reg256#3 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a4=%ymm2,<a4=%ymm2 vinsertf128 $0x1, 160( % rsi), % ymm2, % ymm2 # qhasm: r15 = b11 & a4 # asm 1: vpand <b11=reg256#1,<a4=reg256#3,>r15=reg256#4 # asm 2: vpand <b11=%ymm0,<a4=%ymm2,>r15=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#5,<a3=reg256#5 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm4,<a3=%ymm4 vinsertf128 $0x0, 48( % rsi), % ymm4, % ymm4 # qhasm: a3[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a3=reg256#5,<a3=reg256#5 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a3=%ymm4,<a3=%ymm4 vinsertf128 $0x1, 144( % rsi), % ymm4, % ymm4 # qhasm: r14 = b11 & a3 # asm 1: vpand <b11=reg256#1,<a3=reg256#5,>r14=reg256#6 # asm 2: vpand <b11=%ymm0,<a3=%ymm4,>r14=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#7,<a2=reg256#7 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm6,<a2=%ymm6 vinsertf128 $0x0, 32( % rsi), % ymm6, % ymm6 # qhasm: a2[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a2=reg256#7,<a2=reg256#7 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a2=%ymm6,<a2=%ymm6 vinsertf128 $0x1, 128( % rsi), % ymm6, % ymm6 # qhasm: r13 = b11 & a2 # asm 1: vpand <b11=reg256#1,<a2=reg256#7,>r13=reg256#8 # asm 2: vpand <b11=%ymm0,<a2=%ymm6,>r13=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#9,<a1=reg256#9 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm8,<a1=%ymm8 vinsertf128 $0x0, 16( % rsi), % ymm8, % ymm8 # qhasm: a1[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a1=reg256#9,<a1=reg256#9 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a1=%ymm8,<a1=%ymm8 vinsertf128 $0x1, 112( % rsi), % ymm8, % ymm8 # qhasm: r12 = b11 & a1 # asm 1: vpand <b11=reg256#1,<a1=reg256#9,>r12=reg256#10 # asm 2: vpand <b11=%ymm0,<a1=%ymm8,>r12=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#11,<a0=reg256#11 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm10,<a0=%ymm10 vinsertf128 $0x0, 0( % rsi), % ymm10, % ymm10 # qhasm: a0[1] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x1,96(<input_1=int64#2),<a0=reg256#11,<a0=reg256#11 # asm 2: vinsertf128 $0x1,96(<input_1=%rsi),<a0=%ymm10,<a0=%ymm10 vinsertf128 $0x1, 96( % rsi), % ymm10, % ymm10 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#1,<a0=reg256#11,>r11=reg256#1 # asm 2: vpand <b11=%ymm0,<a0=%ymm10,>r11=%ymm0 vpand % ymm0, % ymm10, % ymm0 # qhasm: b10 = mem128[ input_2 + 160 ] x2 # asm 1: vbroadcasti128 160(<input_2=int64#3), >b10=reg256#12 # asm 2: vbroadcasti128 160(<input_2=%rdx), >b10=%ymm11 vbroadcasti128 160( % rdx), % ymm11 # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b10=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#13,<r15=reg256#4,<r15=reg256#4 # asm 2: vpxor <r=%ymm12,<r15=%ymm3,<r15=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#4,480(<ptr=int64#4) # asm 2: vmovupd <r15=%ymm3,480(<ptr=%rcx) vmovupd % ymm3, 480( % rcx) # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#12,<a4=reg256#3,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a4=%ymm2,>r=%ymm3 vpand % ymm11, % ymm2, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm3,<r14=%ymm5,<r14=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#12,<a3=reg256#5,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a3=%ymm4,>r=%ymm3 vpand % ymm11, % ymm4, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm3,<r13=%ymm7,<r13=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#12,<a2=reg256#7,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a2=%ymm6,>r=%ymm3 vpand % ymm11, % ymm6, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm3,<r12=%ymm9,<r12=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#12,<a1=reg256#9,>r=reg256#4 # asm 2: vpand <b10=%ymm11,<a1=%ymm8,>r=%ymm3 vpand % ymm11, % ymm8, % ymm3 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#4,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm3,<r11=%ymm0,<r11=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#12,<a0=reg256#11,>r10=reg256#4 # asm 2: vpand <b10=%ymm11,<a0=%ymm10,>r10=%ymm3 vpand % ymm11, % ymm10, % ymm3 # qhasm: b9 = mem128[ input_2 + 144 ] x2 # asm 1: vbroadcasti128 144(<input_2=int64#3), >b9=reg256#12 # asm 2: vbroadcasti128 144(<input_2=%rdx), >b9=%ymm11 vbroadcasti128 144( % rdx), % ymm11 # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b9=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#13,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm12,<r14=%ymm5,<r14=%ymm5 vpxor % ymm12, % ymm5, % ymm5 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#6,448(<ptr=int64#4) # asm 2: vmovupd <r14=%ymm5,448(<ptr=%rcx) vmovupd % ymm5, 448( % rcx) # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#12,<a4=reg256#3,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a4=%ymm2,>r=%ymm5 vpand % ymm11, % ymm2, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm5,<r13=%ymm7,<r13=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#12,<a3=reg256#5,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a3=%ymm4,>r=%ymm5 vpand % ymm11, % ymm4, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm5,<r12=%ymm9,<r12=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#12,<a2=reg256#7,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a2=%ymm6,>r=%ymm5 vpand % ymm11, % ymm6, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm5,<r11=%ymm0,<r11=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#12,<a1=reg256#9,>r=reg256#6 # asm 2: vpand <b9=%ymm11,<a1=%ymm8,>r=%ymm5 vpand % ymm11, % ymm8, % ymm5 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#6,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm5,<r10=%ymm3,<r10=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#12,<a0=reg256#11,>r9=reg256#6 # asm 2: vpand <b9=%ymm11,<a0=%ymm10,>r9=%ymm5 vpand % ymm11, % ymm10, % ymm5 # qhasm: b8 = mem128[ input_2 + 128 ] x2 # asm 1: vbroadcasti128 128(<input_2=int64#3), >b8=reg256#12 # asm 2: vbroadcasti128 128(<input_2=%rdx), >b8=%ymm11 vbroadcasti128 128( % rdx), % ymm11 # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b8=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#13,<r13=reg256#8,<r13=reg256#8 # asm 2: vpxor <r=%ymm12,<r13=%ymm7,<r13=%ymm7 vpxor % ymm12, % ymm7, % ymm7 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#8,416(<ptr=int64#4) # asm 2: vmovupd <r13=%ymm7,416(<ptr=%rcx) vmovupd % ymm7, 416( % rcx) # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#12,<a4=reg256#3,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a4=%ymm2,>r=%ymm7 vpand % ymm11, % ymm2, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm7,<r12=%ymm9,<r12=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#12,<a3=reg256#5,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a3=%ymm4,>r=%ymm7 vpand % ymm11, % ymm4, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm7,<r11=%ymm0,<r11=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#12,<a2=reg256#7,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a2=%ymm6,>r=%ymm7 vpand % ymm11, % ymm6, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm7,<r10=%ymm3,<r10=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#12,<a1=reg256#9,>r=reg256#8 # asm 2: vpand <b8=%ymm11,<a1=%ymm8,>r=%ymm7 vpand % ymm11, % ymm8, % ymm7 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#8,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm7,<r9=%ymm5,<r9=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#12,<a0=reg256#11,>r8=reg256#8 # asm 2: vpand <b8=%ymm11,<a0=%ymm10,>r8=%ymm7 vpand % ymm11, % ymm10, % ymm7 # qhasm: b7 = mem128[ input_2 + 112 ] x2 # asm 1: vbroadcasti128 112(<input_2=int64#3), >b7=reg256#12 # asm 2: vbroadcasti128 112(<input_2=%rdx), >b7=%ymm11 vbroadcasti128 112( % rdx), % ymm11 # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b7=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#13,<r12=reg256#10,<r12=reg256#10 # asm 2: vpxor <r=%ymm12,<r12=%ymm9,<r12=%ymm9 vpxor % ymm12, % ymm9, % ymm9 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#10,384(<ptr=int64#4) # asm 2: vmovupd <r12=%ymm9,384(<ptr=%rcx) vmovupd % ymm9, 384( % rcx) # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#12,<a4=reg256#3,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a4=%ymm2,>r=%ymm9 vpand % ymm11, % ymm2, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm9,<r11=%ymm0,<r11=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#12,<a3=reg256#5,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a3=%ymm4,>r=%ymm9 vpand % ymm11, % ymm4, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm9,<r10=%ymm3,<r10=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#12,<a2=reg256#7,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a2=%ymm6,>r=%ymm9 vpand % ymm11, % ymm6, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm9,<r9=%ymm5,<r9=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#12,<a1=reg256#9,>r=reg256#10 # asm 2: vpand <b7=%ymm11,<a1=%ymm8,>r=%ymm9 vpand % ymm11, % ymm8, % ymm9 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#10,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm9,<r8=%ymm7,<r8=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#12,<a0=reg256#11,>r7=reg256#10 # asm 2: vpand <b7=%ymm11,<a0=%ymm10,>r7=%ymm9 vpand % ymm11, % ymm10, % ymm9 # qhasm: b6 = mem128[ input_2 + 96 ] x2 # asm 1: vbroadcasti128 96(<input_2=int64#3), >b6=reg256#12 # asm 2: vbroadcasti128 96(<input_2=%rdx), >b6=%ymm11 vbroadcasti128 96( % rdx), % ymm11 # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b6=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#13,<r11=reg256#1,<r11=reg256#1 # asm 2: vpxor <r=%ymm12,<r11=%ymm0,<r11=%ymm0 vpxor % ymm12, % ymm0, % ymm0 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<ptr=int64#4) # asm 2: vmovupd <r11=%ymm0,352(<ptr=%rcx) vmovupd % ymm0, 352( % rcx) # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#12,<a4=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a4=%ymm2,>r=%ymm0 vpand % ymm11, % ymm2, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm0,<r10=%ymm3,<r10=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#12,<a3=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a3=%ymm4,>r=%ymm0 vpand % ymm11, % ymm4, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm0,<r9=%ymm5,<r9=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#12,<a2=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a2=%ymm6,>r=%ymm0 vpand % ymm11, % ymm6, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm0,<r8=%ymm7,<r8=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#12,<a1=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm11,<a1=%ymm8,>r=%ymm0 vpand % ymm11, % ymm8, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm0,<r7=%ymm9,<r7=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#12,<a0=reg256#11,>r6=reg256#1 # asm 2: vpand <b6=%ymm11,<a0=%ymm10,>r6=%ymm0 vpand % ymm11, % ymm10, % ymm0 # qhasm: b5 = mem128[ input_2 + 80 ] x2 # asm 1: vbroadcasti128 80(<input_2=int64#3), >b5=reg256#12 # asm 2: vbroadcasti128 80(<input_2=%rdx), >b5=%ymm11 vbroadcasti128 80( % rdx), % ymm11 # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b5=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#13,<r10=reg256#4,<r10=reg256#4 # asm 2: vpxor <r=%ymm12,<r10=%ymm3,<r10=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#4,320(<ptr=int64#4) # asm 2: vmovupd <r10=%ymm3,320(<ptr=%rcx) vmovupd % ymm3, 320( % rcx) # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#12,<a4=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a4=%ymm2,>r=%ymm3 vpand % ymm11, % ymm2, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm3,<r9=%ymm5,<r9=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#12,<a3=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a3=%ymm4,>r=%ymm3 vpand % ymm11, % ymm4, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm3,<r8=%ymm7,<r8=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#12,<a2=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a2=%ymm6,>r=%ymm3 vpand % ymm11, % ymm6, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm3,<r7=%ymm9,<r7=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#12,<a1=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm11,<a1=%ymm8,>r=%ymm3 vpand % ymm11, % ymm8, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#12,<a0=reg256#11,>r5=reg256#4 # asm 2: vpand <b5=%ymm11,<a0=%ymm10,>r5=%ymm3 vpand % ymm11, % ymm10, % ymm3 # qhasm: b4 = mem128[ input_2 + 64 ] x2 # asm 1: vbroadcasti128 64(<input_2=int64#3), >b4=reg256#12 # asm 2: vbroadcasti128 64(<input_2=%rdx), >b4=%ymm11 vbroadcasti128 64( % rdx), % ymm11 # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b4=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#13,<r9=reg256#6,<r9=reg256#6 # asm 2: vpxor <r=%ymm12,<r9=%ymm5,<r9=%ymm5 vpxor % ymm12, % ymm5, % ymm5 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#6,288(<ptr=int64#4) # asm 2: vmovupd <r9=%ymm5,288(<ptr=%rcx) vmovupd % ymm5, 288( % rcx) # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#12,<a4=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a4=%ymm2,>r=%ymm5 vpand % ymm11, % ymm2, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm5,<r8=%ymm7,<r8=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#12,<a3=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a3=%ymm4,>r=%ymm5 vpand % ymm11, % ymm4, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm5,<r7=%ymm9,<r7=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#12,<a2=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a2=%ymm6,>r=%ymm5 vpand % ymm11, % ymm6, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#12,<a1=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm11,<a1=%ymm8,>r=%ymm5 vpand % ymm11, % ymm8, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#12,<a0=reg256#11,>r4=reg256#6 # asm 2: vpand <b4=%ymm11,<a0=%ymm10,>r4=%ymm5 vpand % ymm11, % ymm10, % ymm5 # qhasm: b3 = mem128[ input_2 + 48 ] x2 # asm 1: vbroadcasti128 48(<input_2=int64#3), >b3=reg256#12 # asm 2: vbroadcasti128 48(<input_2=%rdx), >b3=%ymm11 vbroadcasti128 48( % rdx), % ymm11 # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b3=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#13,<r8=reg256#8,<r8=reg256#8 # asm 2: vpxor <r=%ymm12,<r8=%ymm7,<r8=%ymm7 vpxor % ymm12, % ymm7, % ymm7 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#8,256(<ptr=int64#4) # asm 2: vmovupd <r8=%ymm7,256(<ptr=%rcx) vmovupd % ymm7, 256( % rcx) # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#12,<a4=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a4=%ymm2,>r=%ymm7 vpand % ymm11, % ymm2, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm7,<r7=%ymm9,<r7=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#12,<a3=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a3=%ymm4,>r=%ymm7 vpand % ymm11, % ymm4, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#12,<a2=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a2=%ymm6,>r=%ymm7 vpand % ymm11, % ymm6, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#12,<a1=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm11,<a1=%ymm8,>r=%ymm7 vpand % ymm11, % ymm8, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#12,<a0=reg256#11,>r3=reg256#8 # asm 2: vpand <b3=%ymm11,<a0=%ymm10,>r3=%ymm7 vpand % ymm11, % ymm10, % ymm7 # qhasm: b2 = mem128[ input_2 + 32 ] x2 # asm 1: vbroadcasti128 32(<input_2=int64#3), >b2=reg256#12 # asm 2: vbroadcasti128 32(<input_2=%rdx), >b2=%ymm11 vbroadcasti128 32( % rdx), % ymm11 # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b2=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#13,<r7=reg256#10,<r7=reg256#10 # asm 2: vpxor <r=%ymm12,<r7=%ymm9,<r7=%ymm9 vpxor % ymm12, % ymm9, % ymm9 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#10,224(<ptr=int64#4) # asm 2: vmovupd <r7=%ymm9,224(<ptr=%rcx) vmovupd % ymm9, 224( % rcx) # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#12,<a4=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a4=%ymm2,>r=%ymm9 vpand % ymm11, % ymm2, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#12,<a3=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a3=%ymm4,>r=%ymm9 vpand % ymm11, % ymm4, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#12,<a2=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a2=%ymm6,>r=%ymm9 vpand % ymm11, % ymm6, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#12,<a1=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm11,<a1=%ymm8,>r=%ymm9 vpand % ymm11, % ymm8, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#12,<a0=reg256#11,>r2=reg256#10 # asm 2: vpand <b2=%ymm11,<a0=%ymm10,>r2=%ymm9 vpand % ymm11, % ymm10, % ymm9 # qhasm: b1 = mem128[ input_2 + 16 ] x2 # asm 1: vbroadcasti128 16(<input_2=int64#3), >b1=reg256#12 # asm 2: vbroadcasti128 16(<input_2=%rdx), >b1=%ymm11 vbroadcasti128 16( % rdx), % ymm11 # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#12,<a5=reg256#2,>r=reg256#13 # asm 2: vpand <b1=%ymm11,<a5=%ymm1,>r=%ymm12 vpand % ymm11, % ymm1, % ymm12 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#13,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm12,<r6=%ymm0,<r6=%ymm0 vpxor % ymm12, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#4) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%rcx) vmovupd % ymm0, 192( % rcx) # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#12,<a4=reg256#3,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a4=%ymm2,>r=%ymm0 vpand % ymm11, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#12,<a3=reg256#5,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a3=%ymm4,>r=%ymm0 vpand % ymm11, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#12,<a2=reg256#7,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a2=%ymm6,>r=%ymm0 vpand % ymm11, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#12,<a1=reg256#9,>r=reg256#1 # asm 2: vpand <b1=%ymm11,<a1=%ymm8,>r=%ymm0 vpand % ymm11, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#12,<a0=reg256#11,>r1=reg256#1 # asm 2: vpand <b1=%ymm11,<a0=%ymm10,>r1=%ymm0 vpand % ymm11, % ymm10, % ymm0 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#12 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm11 vbroadcasti128 0( % rdx), % ymm11 # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#12,<a5=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a5=%ymm1,>r=%ymm1 vpand % ymm11, % ymm1, % ymm1 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#2,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm1,<r5=%ymm3,<r5=%ymm3 vpxor % ymm1, % ymm3, % ymm3 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#4) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%rcx) vmovupd % ymm3, 160( % rcx) # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#12,<a4=reg256#3,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a4=%ymm2,>r=%ymm1 vpand % ymm11, % ymm2, % ymm1 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#2,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm1,<r4=%ymm5,<r4=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#12,<a3=reg256#5,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a3=%ymm4,>r=%ymm1 vpand % ymm11, % ymm4, % ymm1 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#2,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm1,<r3=%ymm7,<r3=%ymm7 vpxor % ymm1, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#12,<a2=reg256#7,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a2=%ymm6,>r=%ymm1 vpand % ymm11, % ymm6, % ymm1 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#2,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm1,<r2=%ymm9,<r2=%ymm9 vpxor % ymm1, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#12,<a1=reg256#9,>r=reg256#2 # asm 2: vpand <b0=%ymm11,<a1=%ymm8,>r=%ymm1 vpand % ymm11, % ymm8, % ymm1 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#2,<r1=reg256#1,<r1=reg256#1 # asm 2: vpxor <r=%ymm1,<r1=%ymm0,<r1=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#12,<a0=reg256#11,>r0=reg256#2 # asm 2: vpand <b0=%ymm11,<a0=%ymm10,>r0=%ymm1 vpand % ymm11, % ymm10, % ymm1 # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#4) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%rcx) vmovupd % ymm5, 128( % rcx) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#4) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%rcx) vmovupd % ymm7, 96( % rcx) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#4) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%rcx) vmovupd % ymm9, 64( % rcx) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<ptr=int64#4) # asm 2: vmovupd <r1=%ymm0,32(<ptr=%rcx) vmovupd % ymm0, 32( % rcx) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#2,0(<ptr=int64#4) # asm 2: vmovupd <r0=%ymm1,0(<ptr=%rcx) vmovupd % ymm1, 0( % rcx) # qhasm: vzeroupper vzeroupper # qhasm: h22 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#4),>h22=reg128#1 # asm 2: movdqu 528(<ptr=%rcx),>h22=%xmm0 movdqu 528( % rcx), % xmm0 # qhasm: h13 = h22 # asm 1: movdqa <h22=reg128#1,>h13=reg128#2 # asm 2: movdqa <h22=%xmm0,>h13=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h10 = h22 # asm 1: movdqa <h22=reg128#1,>h10=reg128#1 # asm 2: movdqa <h22=%xmm0,>h10=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h21 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#4),>h21=reg128#3 # asm 2: movdqu 496(<ptr=%rcx),>h21=%xmm2 movdqu 496( % rcx), % xmm2 # qhasm: h12 = h21 # asm 1: movdqa <h21=reg128#3,>h12=reg128#4 # asm 2: movdqa <h21=%xmm2,>h12=%xmm3 movdqa % xmm2, % xmm3 # qhasm: h9 = h21 # asm 1: movdqa <h21=reg128#3,>h9=reg128#3 # asm 2: movdqa <h21=%xmm2,>h9=%xmm2 movdqa % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#4),>h20=reg128#5 # asm 2: movdqu 464(<ptr=%rcx),>h20=%xmm4 movdqu 464( % rcx), % xmm4 # qhasm: h11 = h20 # asm 1: movdqa <h20=reg128#5,>h11=reg128#6 # asm 2: movdqa <h20=%xmm4,>h11=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h8 = h20 # asm 1: movdqa <h20=reg128#5,>h8=reg128#5 # asm 2: movdqa <h20=%xmm4,>h8=%xmm4 movdqa % xmm4, % xmm4 # qhasm: h19 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#4),>h19=reg128#7 # asm 2: movdqu 432(<ptr=%rcx),>h19=%xmm6 movdqu 432( % rcx), % xmm6 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor <h19=%xmm6,<h10=%xmm0,>h10=%xmm0 vpxor % xmm6, % xmm0, % xmm0 # qhasm: h7 = h19 # asm 1: movdqa <h19=reg128#7,>h7=reg128#7 # asm 2: movdqa <h19=%xmm6,>h7=%xmm6 movdqa % xmm6, % xmm6 # qhasm: h18 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#4),>h18=reg128#8 # asm 2: movdqu 400(<ptr=%rcx),>h18=%xmm7 movdqu 400( % rcx), % xmm7 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#8,<h9=reg128#3,>h9=reg128#3 # asm 2: vpxor <h18=%xmm7,<h9=%xmm2,>h9=%xmm2 vpxor % xmm7, % xmm2, % xmm2 # qhasm: h6 = h18 # asm 1: movdqa <h18=reg128#8,>h6=reg128#8 # asm 2: movdqa <h18=%xmm7,>h6=%xmm7 movdqa % xmm7, % xmm7 # qhasm: h17 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#4),>h17=reg128#9 # asm 2: movdqu 368(<ptr=%rcx),>h17=%xmm8 movdqu 368( % rcx), % xmm8 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#9,<h8=reg128#5,>h8=reg128#5 # asm 2: vpxor <h17=%xmm8,<h8=%xmm4,>h8=%xmm4 vpxor % xmm8, % xmm4, % xmm4 # qhasm: h5 = h17 # asm 1: movdqa <h17=reg128#9,>h5=reg128#9 # asm 2: movdqa <h17=%xmm8,>h5=%xmm8 movdqa % xmm8, % xmm8 # qhasm: h16 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#4),>h16=reg128#10 # asm 2: movdqu 336(<ptr=%rcx),>h16=%xmm9 movdqu 336( % rcx), % xmm9 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#4),<h16=reg128#10,>h16=reg128#10 # asm 2: vpxor 512(<ptr=%rcx),<h16=%xmm9,>h16=%xmm9 vpxor 512( % rcx), % xmm9, % xmm9 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#10,<h7=reg128#7,>h7=reg128#7 # asm 2: vpxor <h16=%xmm9,<h7=%xmm6,>h7=%xmm6 vpxor % xmm9, % xmm6, % xmm6 # qhasm: h4 = h16 # asm 1: movdqa <h16=reg128#10,>h4=reg128#10 # asm 2: movdqa <h16=%xmm9,>h4=%xmm9 movdqa % xmm9, % xmm9 # qhasm: h15 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#4),>h15=reg128#11 # asm 2: movdqu 304(<ptr=%rcx),>h15=%xmm10 movdqu 304( % rcx), % xmm10 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#4),<h15=reg128#11,>h15=reg128#11 # asm 2: vpxor 480(<ptr=%rcx),<h15=%xmm10,>h15=%xmm10 vpxor 480( % rcx), % xmm10, % xmm10 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#11,<h6=reg128#8,>h6=reg128#8 # asm 2: vpxor <h15=%xmm10,<h6=%xmm7,>h6=%xmm7 vpxor % xmm10, % xmm7, % xmm7 # qhasm: h3 = h15 # asm 1: movdqa <h15=reg128#11,>h3=reg128#11 # asm 2: movdqa <h15=%xmm10,>h3=%xmm10 movdqa % xmm10, % xmm10 # qhasm: h14 = mem128[ ptr + 272 ] # asm 1: movdqu 272(<ptr=int64#4),>h14=reg128#12 # asm 2: movdqu 272(<ptr=%rcx),>h14=%xmm11 movdqu 272( % rcx), % xmm11 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#4),<h14=reg128#12,>h14=reg128#12 # asm 2: vpxor 448(<ptr=%rcx),<h14=%xmm11,>h14=%xmm11 vpxor 448( % rcx), % xmm11, % xmm11 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#12,<h5=reg128#9,>h5=reg128#9 # asm 2: vpxor <h14=%xmm11,<h5=%xmm8,>h5=%xmm8 vpxor % xmm11, % xmm8, % xmm8 # qhasm: h2 = h14 # asm 1: movdqa <h14=reg128#12,>h2=reg128#12 # asm 2: movdqa <h14=%xmm11,>h2=%xmm11 movdqa % xmm11, % xmm11 # qhasm: h13 = h13 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#4),<h13=reg128#2,>h13=reg128#2 # asm 2: vpxor 240(<ptr=%rcx),<h13=%xmm1,>h13=%xmm1 vpxor 240( % rcx), % xmm1, % xmm1 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#4),<h13=reg128#2,>h13=reg128#2 # asm 2: vpxor 416(<ptr=%rcx),<h13=%xmm1,>h13=%xmm1 vpxor 416( % rcx), % xmm1, % xmm1 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#2,<h4=reg128#10,>h4=reg128#10 # asm 2: vpxor <h13=%xmm1,<h4=%xmm9,>h4=%xmm9 vpxor % xmm1, % xmm9, % xmm9 # qhasm: h1 = h13 # asm 1: movdqa <h13=reg128#2,>h1=reg128#2 # asm 2: movdqa <h13=%xmm1,>h1=%xmm1 movdqa % xmm1, % xmm1 # qhasm: h12 = h12 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#4),<h12=reg128#4,>h12=reg128#4 # asm 2: vpxor 208(<ptr=%rcx),<h12=%xmm3,>h12=%xmm3 vpxor 208( % rcx), % xmm3, % xmm3 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#4),<h12=reg128#4,>h12=reg128#4 # asm 2: vpxor 384(<ptr=%rcx),<h12=%xmm3,>h12=%xmm3 vpxor 384( % rcx), % xmm3, % xmm3 # qhasm: h3 = h3 ^ h12 # asm 1: vpxor <h12=reg128#4,<h3=reg128#11,>h3=reg128#11 # asm 2: vpxor <h12=%xmm3,<h3=%xmm10,>h3=%xmm10 vpxor % xmm3, % xmm10, % xmm10 # qhasm: h0 = h12 # asm 1: movdqa <h12=reg128#4,>h0=reg128#4 # asm 2: movdqa <h12=%xmm3,>h0=%xmm3 movdqa % xmm3, % xmm3 # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#4),<h11=reg128#6,>h11=reg128#6 # asm 2: vpxor 352(<ptr=%rcx),<h11=%xmm5,>h11=%xmm5 vpxor 352( % rcx), % xmm5, % xmm5 # qhasm: h11 = h11 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#4),<h11=reg128#6,>h11=reg128#6 # asm 2: vpxor 176(<ptr=%rcx),<h11=%xmm5,>h11=%xmm5 vpxor 176( % rcx), % xmm5, % xmm5 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#6,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm5,176(<input_0=%rdi) movdqu % xmm5, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#4),<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor 320(<ptr=%rcx),<h10=%xmm0,>h10=%xmm0 vpxor 320( % rcx), % xmm0, % xmm0 # qhasm: h10 = h10 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#4),<h10=reg128#1,>h10=reg128#1 # asm 2: vpxor 144(<ptr=%rcx),<h10=%xmm0,>h10=%xmm0 vpxor 144( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#1,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm0,160(<input_0=%rdi) movdqu % xmm0, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#4),<h9=reg128#3,>h9=reg128#1 # asm 2: vpxor 288(<ptr=%rcx),<h9=%xmm2,>h9=%xmm0 vpxor 288( % rcx), % xmm2, % xmm0 # qhasm: h9 = h9 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#4),<h9=reg128#1,>h9=reg128#1 # asm 2: vpxor 112(<ptr=%rcx),<h9=%xmm0,>h9=%xmm0 vpxor 112( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#1,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm0,144(<input_0=%rdi) movdqu % xmm0, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#4),<h8=reg128#5,>h8=reg128#1 # asm 2: vpxor 256(<ptr=%rcx),<h8=%xmm4,>h8=%xmm0 vpxor 256( % rcx), % xmm4, % xmm0 # qhasm: h8 = h8 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#4),<h8=reg128#1,>h8=reg128#1 # asm 2: vpxor 80(<ptr=%rcx),<h8=%xmm0,>h8=%xmm0 vpxor 80( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#1,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm0,128(<input_0=%rdi) movdqu % xmm0, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#4),<h7=reg128#7,>h7=reg128#1 # asm 2: vpxor 224(<ptr=%rcx),<h7=%xmm6,>h7=%xmm0 vpxor 224( % rcx), % xmm6, % xmm0 # qhasm: h7 = h7 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#4),<h7=reg128#1,>h7=reg128#1 # asm 2: vpxor 48(<ptr=%rcx),<h7=%xmm0,>h7=%xmm0 vpxor 48( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#1,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm0,112(<input_0=%rdi) movdqu % xmm0, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#4),<h6=reg128#8,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%rcx),<h6=%xmm7,>h6=%xmm0 vpxor 192( % rcx), % xmm7, % xmm0 # qhasm: h6 = h6 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#4),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 16(<ptr=%rcx),<h6=%xmm0,>h6=%xmm0 vpxor 16( % rcx), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#4),<h5=reg128#9,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%rcx),<h5=%xmm8,>h5=%xmm0 vpxor 160( % rcx), % xmm8, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#4),<h4=reg128#10,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%rcx),<h4=%xmm9,>h4=%xmm0 vpxor 128( % rcx), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#4),<h3=reg128#11,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%rcx),<h3=%xmm10,>h3=%xmm0 vpxor 96( % rcx), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#4),<h2=reg128#12,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%rcx),<h2=%xmm11,>h2=%xmm0 vpxor 64( % rcx), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#4),<h1=reg128#2,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%rcx),<h1=%xmm1,>h1=%xmm0 vpxor 32( % rcx), % xmm1, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#4),<h0=reg128#4,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%rcx),<h0=%xmm3,>h0=%xmm0 vpxor 0( % rcx), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
7,454
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t=int64#2 # asm 2: movq 88(<input_0=%rdi),>t=%rsi movq 88( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t=int64#2 # asm 2: movq 80(<input_0=%rdi),>t=%rsi movq 80( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t=int64#2 # asm 2: movq 72(<input_0=%rdi),>t=%rsi movq 72( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t=int64#2 # asm 2: movq 64(<input_0=%rdi),>t=%rsi movq 64( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t=int64#2 # asm 2: movq 56(<input_0=%rdi),>t=%rsi movq 56( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t=int64#2 # asm 2: movq 48(<input_0=%rdi),>t=%rsi movq 48( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t=int64#2 # asm 2: movq 40(<input_0=%rdi),>t=%rsi movq 40( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t=int64#2 # asm 2: movq 32(<input_0=%rdi),>t=%rsi movq 32( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t=int64#2 # asm 2: movq 24(<input_0=%rdi),>t=%rsi movq 24( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t=int64#2 # asm 2: movq 16(<input_0=%rdi),>t=%rsi movq 16( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t=int64#2 # asm 2: movq 8(<input_0=%rdi),>t=%rsi movq 8( % rdi), % rsi # qhasm: c = count(t) # asm 1: popcnt <t=int64#2, >c=int64#2 # asm 2: popcnt <t=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t=int64#1 # asm 2: movq 0(<input_0=%rdi),>t=%rdi movq 0( % rdi), % rdi # qhasm: c = count(t) # asm 1: popcnt <t=int64#1, >c=int64#1 # asm 2: popcnt <t=%rdi, >c=%rdi popcnt % rdi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
14,105
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 260780 # asm 1: add $260780,<input_1=int64#2 # asm 2: add $260780,<input_1=%rsi add $260780, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 768 # asm 1: mov $768,>row=int64#5 # asm 2: mov $768,>row=%r8 mov $768, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm2 vmovupd 128( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm2 vmovupd 160( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 192 ] # asm 1: vmovupd 192(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 192(<input_2=%rdx),>ee=%ymm2 vmovupd 192( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 224 ] # asm 1: vmovupd 224(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 224(<input_2=%rdx),>ee=%ymm2 vmovupd 224( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 256 ] # asm 1: vmovupd 256(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 256(<input_2=%rdx),>ee=%ymm2 vmovupd 256( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 288 ] # asm 1: vmovupd 288(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 288(<input_2=%rdx),>ee=%ymm2 vmovupd 288( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 320 ] # asm 1: vmovupd 320(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 320(<input_2=%rdx),>ee=%ymm2 vmovupd 320( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 352 ] # asm 1: vmovupd 352(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 352(<input_2=%rdx),>ee=%ymm2 vmovupd 352( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 384 ] # asm 1: vmovupd 384(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 384(<input_2=%rdx),>ee=%ymm2 vmovupd 384( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = *(uint64 *)(input_1 + 320) # asm 1: movq 320(<input_1=int64#2),>s=int64#6 # asm 2: movq 320(<input_1=%rsi),>s=%r9 movq 320( % rsi), % r9 # qhasm: e = *(uint64 *)(input_2 + 416) # asm 1: movq 416(<input_2=int64#3),>e=int64#7 # asm 2: movq 416(<input_2=%rdx),>e=%rax movq 416( % rdx), % rax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = *(uint64 *)(input_1 + 328) # asm 1: movq 328(<input_1=int64#2),>p=int64#7 # asm 2: movq 328(<input_1=%rsi),>p=%rax movq 328( % rsi), % rax # qhasm: e = *(uint64 *)(input_2 + 424) # asm 1: movq 424(<input_2=int64#3),>e=int64#8 # asm 2: movq 424(<input_2=%rdx),>e=%r10 movq 424( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: p = *(uint32 *)(input_1 + 336) # asm 1: movl 336(<input_1=int64#2),>p=int64#7d # asm 2: movl 336(<input_1=%rsi),>p=%eax movl 336( % rsi), % eax # qhasm: e = *(uint32 *)(input_2 + 432) # asm 1: movl 432(<input_2=int64#3),>e=int64#8d # asm 2: movl 432(<input_2=%rdx),>e=%r10d movl 432( % rdx), % r10d # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 340 # asm 1: sub $340,<input_1=int64#2 # asm 2: sub $340,<input_1=%rsi sub $340, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
56,484
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: r11 = a11 & b0 # asm 1: vpand <a11=reg256#2,<b0=reg256#1,>r11=reg256#3 # asm 2: vpand <a11=%ymm1,<b0=%ymm0,>r11=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r12 = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#2,>r12=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm1,>r12=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r13 = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#2,>r13=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm1,>r13=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r14 = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#2,>r14=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm1,>r14=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r15 = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#2,>r15=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm1,>r15=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r16 = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#2,>r16=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm1,>r16=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r17 = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#2,>r17=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm1,>r17=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r18 = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#2,>r18=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm1,>r18=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r19 = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#2,>r19=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm1,>r19=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r20 = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#2,>r20=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm1,>r20=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r21 = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#2,>r21=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm1,>r21=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r22 = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#2,>r22=reg256#2 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm1,>r22=%ymm1 vpand 352( % rdx), % ymm1, % ymm1 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#2,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r22=%ymm1,<r13=%ymm4,<r13=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r10 = r22 # asm 1: vmovapd <r22=reg256#2,>r10=reg256#2 # asm 2: vmovapd <r22=%ymm1,>r10=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#14 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm13 vmovupd 320( % rsi), % ymm13 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a10=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#12,<r20=reg256#12 # asm 2: vpxor <r=%ymm14,<r20=%ymm11,<r20=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#14,<r21=reg256#13,<r21=reg256#13 # asm 2: vpxor <r=%ymm13,<r21=%ymm12,<r21=%ymm12 vpxor % ymm13, % ymm12, % ymm12 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#13,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r21=%ymm12,<r12=%ymm3,<r12=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r9 = r21 # asm 1: vmovapd <r21=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r21=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#14 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm13 vmovupd 288( % rsi), % ymm13 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a9=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#14,<r20=reg256#12,<r20=reg256#12 # asm 2: vpxor <r=%ymm13,<r20=%ymm11,<r20=%ymm11 vpxor % ymm13, % ymm11, % ymm11 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#12,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r20=%ymm11,<r11=%ymm2,<r11=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r8 = r20 # asm 1: vmovapd <r20=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r20=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#14 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm13 vmovupd 256( % rsi), % ymm13 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a8=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#14,<r19=reg256#11,<r19=reg256#11 # asm 2: vpxor <r=%ymm13,<r19=%ymm10,<r19=%ymm10 vpxor % ymm13, % ymm10, % ymm10 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#11,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r19=%ymm10,<r10=%ymm1,<r10=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r7 = r19 # asm 1: vmovapd <r19=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r19=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#14 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm13 vmovupd 224( % rsi), % ymm13 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a7=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#14,<r18=reg256#10,<r18=reg256#10 # asm 2: vpxor <r=%ymm13,<r18=%ymm9,<r18=%ymm9 vpxor % ymm13, % ymm9, % ymm9 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r6 = r18 # asm 1: vmovapd <r18=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r18=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#14 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm13 vmovupd 192( % rsi), % ymm13 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a6=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#14,<r17=reg256#9,<r17=reg256#9 # asm 2: vpxor <r=%ymm13,<r17=%ymm8,<r17=%ymm8 vpxor % ymm13, % ymm8, % ymm8 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r5 = r17 # asm 1: vmovapd <r17=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r17=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#14 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm13 vmovupd 160( % rsi), % ymm13 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a5=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#14,<r16=reg256#8,<r16=reg256#8 # asm 2: vpxor <r=%ymm13,<r16=%ymm7,<r16=%ymm7 vpxor % ymm13, % ymm7, % ymm7 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r4 = r16 # asm 1: vmovapd <r16=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r16=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#14 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm13 vmovupd 128( % rsi), % ymm13 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a4=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#14,<r15=reg256#7,<r15=reg256#7 # asm 2: vpxor <r=%ymm13,<r15=%ymm6,<r15=%ymm6 vpxor % ymm13, % ymm6, % ymm6 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r3 = r15 # asm 1: vmovapd <r15=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r15=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#14 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm13 vmovupd 96( % rsi), % ymm13 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a3=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#14,<r14=reg256#6,<r14=reg256#6 # asm 2: vpxor <r=%ymm13,<r14=%ymm5,<r14=%ymm5 vpxor % ymm13, % ymm5, % ymm5 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r2 = r14 # asm 1: vmovapd <r14=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r14=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#14 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm13 vmovupd 64( % rsi), % ymm13 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a2=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#14,<r13=reg256#5,<r13=reg256#5 # asm 2: vpxor <r=%ymm13,<r13=%ymm4,<r13=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r1 = r13 # asm 1: vmovapd <r13=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r13=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#14 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm13 vmovupd 32( % rsi), % ymm13 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#14,<b0=reg256#1,>r=reg256#15 # asm 2: vpand <a1=%ymm13,<b0=%ymm0,>r=%ymm14 vpand % ymm13, % ymm0, % ymm14 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#15,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm14,<r1=%ymm4,<r1=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 32( % rdx), % ymm13, % ymm14 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 64( % rdx), % ymm13, % ymm14 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 96( % rdx), % ymm13, % ymm14 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 128( % rdx), % ymm13, % ymm14 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 160( % rdx), % ymm13, % ymm14 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 192( % rdx), % ymm13, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 224( % rdx), % ymm13, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 256( % rdx), % ymm13, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 288( % rdx), % ymm13, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1 vpxor % ymm14, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#14,>r=reg256#15 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm13,>r=%ymm14 vpand 320( % rdx), % ymm13, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2 vpxor % ymm14, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#14,>r=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm13,>r=%ymm13 vpand 352( % rdx), % ymm13, % ymm13 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#14,<r12=reg256#4,<r12=reg256#4 # asm 2: vpxor <r=%ymm13,<r12=%ymm3,<r12=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r3 ^= r12 # asm 1: vpxor <r12=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r12=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r0 = r12 # asm 1: vmovapd <r12=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r12=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#14 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm13 vmovupd 0( % rsi), % ymm13 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#14,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm13,<b0=%ymm0,>r=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 32( % rdx), % ymm13, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 64( % rdx), % ymm13, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 96( % rdx), % ymm13, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 128( % rdx), % ymm13, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 160( % rdx), % ymm13, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 192( % rdx), % ymm13, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 224( % rdx), % ymm13, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 256( % rdx), % ymm13, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 288( % rdx), % ymm13, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 320( % rdx), % ymm13, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#2,<r10=reg256#2 # asm 2: vpxor <r=%ymm0,<r10=%ymm1,<r10=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#14,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm13,>r=%ymm0 vpand 352( % rdx), % ymm13, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#3,<r11=reg256#3 # asm 2: vpxor <r=%ymm0,<r11=%ymm2,<r11=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#3,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm2,352(<input_0=%rdi) vmovupd % ymm2, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#2,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm1,320(<input_0=%rdi) vmovupd % ymm1, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece348864f/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,935
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/vec256_ama_asm.S
#include "namespace.h" #define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm) #define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_ama_asm .p2align 5 .global _vec256_ama_asm .global vec256_ama_asm _vec256_ama_asm: vec256_ama_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2 # asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1 vmovupd 384( % rdi), % ymm1 # qhasm: a12 = a12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2 # asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1 vpxor 384( % rsi), % ymm1, % ymm1 # qhasm: mem256[ input_0 + 384 ] = a12 # asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1) # asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi) vmovupd % ymm1, 384( % rdi) # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15 # asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14 vmovupd 352( % rdi), % ymm14 # qhasm: a11 = a11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15 # asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14 vpxor 352( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 352 ] = a11 # asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1) # asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi) vmovupd % ymm14, 352( % rdi) # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15 # asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14 vmovupd 320( % rdi), % ymm14 # qhasm: a10 = a10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15 # asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14 vpxor 320( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 320 ] = a10 # asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15 # asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14 vmovupd 288( % rdi), % ymm14 # qhasm: a9 = a9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15 # asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14 vpxor 288( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 288 ] = a9 # asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1) # asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi) vmovupd % ymm14, 288( % rdi) # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15 # asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14 vmovupd 256( % rdi), % ymm14 # qhasm: a8 = a8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15 # asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14 vpxor 256( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 256 ] = a8 # asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1) # asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi) vmovupd % ymm14, 256( % rdi) # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15 # asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14 vmovupd 224( % rdi), % ymm14 # qhasm: a7 = a7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15 # asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14 vpxor 224( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 224 ] = a7 # asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1) # asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi) vmovupd % ymm14, 224( % rdi) # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15 # asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14 vmovupd 192( % rdi), % ymm14 # qhasm: a6 = a6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15 # asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14 vpxor 192( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 192 ] = a6 # asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1) # asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi) vmovupd % ymm14, 192( % rdi) # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15 # asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14 vmovupd 160( % rdi), % ymm14 # qhasm: a5 = a5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15 # asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14 vpxor 160( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 160 ] = a5 # asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1) # asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi) vmovupd % ymm14, 160( % rdi) # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15 # asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14 vmovupd 128( % rdi), % ymm14 # qhasm: a4 = a4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15 # asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14 vpxor 128( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 128 ] = a4 # asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1) # asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi) vmovupd % ymm14, 128( % rdi) # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15 # asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14 vmovupd 96( % rdi), % ymm14 # qhasm: a3 = a3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15 # asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14 vpxor 96( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 96 ] = a3 # asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1) # asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi) vmovupd % ymm14, 96( % rdi) # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15 # asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14 vmovupd 64( % rdi), % ymm14 # qhasm: a2 = a2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15 # asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14 vpxor 64( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 64 ] = a2 # asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15 # asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14 vmovupd 32( % rdi), % ymm14 # qhasm: a1 = a1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15 # asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14 vpxor 32( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 32 ] = a1 # asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1) # asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi) vmovupd % ymm14, 32( % rdi) # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15 # asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14 vmovupd 0( % rdi), % ymm14 # qhasm: a0 = a0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15 # asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14 vpxor 0( % rsi), % ymm14, % ymm14 # qhasm: mem256[ input_0 + 0 ] = a0 # asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1) # asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi) vmovupd % ymm14, 0( % rdi) # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rsi), % ymm2, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rsi), % ymm1, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rsi), % ymm13, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rsi), % ymm12, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rsi), % ymm11, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rsi), % ymm10, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rsi), % ymm9, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rsi), % ymm8, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rsi), % ymm7, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rsi), % ymm6, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rsi), % ymm5, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rsi), % ymm4, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rsi), % ymm3, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
262,634
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/transpose_64x64_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm) #define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 r5 # qhasm: reg128 r6 # qhasm: reg128 r7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: int64 buf # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x64_asm .p2align 5 .global _transpose_64x64_asm .global transpose_64x64_asm _transpose_64x64_asm: transpose_64x64_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7 movddup 64( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8 movddup 128( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9 movddup 192( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10 movddup 256( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11 movddup 320( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12 movddup 384( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13 movddup 448( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 0 ] = buf # asm 1: movq <buf=int64#2,0(<input_0=int64#1) # asm 2: movq <buf=%rsi,0(<input_0=%rdi) movq % rsi, 0( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 64 ] = buf # asm 1: movq <buf=int64#2,64(<input_0=int64#1) # asm 2: movq <buf=%rsi,64(<input_0=%rdi) movq % rsi, 64( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 128 ] = buf # asm 1: movq <buf=int64#2,128(<input_0=int64#1) # asm 2: movq <buf=%rsi,128(<input_0=%rdi) movq % rsi, 128( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 192 ] = buf # asm 1: movq <buf=int64#2,192(<input_0=int64#1) # asm 2: movq <buf=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 256 ] = buf # asm 1: movq <buf=int64#2,256(<input_0=int64#1) # asm 2: movq <buf=%rsi,256(<input_0=%rdi) movq % rsi, 256( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 320 ] = buf # asm 1: movq <buf=int64#2,320(<input_0=int64#1) # asm 2: movq <buf=%rsi,320(<input_0=%rdi) movq % rsi, 320( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 384 ] = buf # asm 1: movq <buf=int64#2,384(<input_0=int64#1) # asm 2: movq <buf=%rsi,384(<input_0=%rdi) movq % rsi, 384( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 448 ] = buf # asm 1: movq <buf=int64#2,448(<input_0=int64#1) # asm 2: movq <buf=%rsi,448(<input_0=%rdi) movq % rsi, 448( % rdi) # qhasm: r0 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6 movddup 8( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8 movddup 136( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9 movddup 200( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10 movddup 264( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11 movddup 328( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12 movddup 392( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13 movddup 456( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 8 ] = buf # asm 1: movq <buf=int64#2,8(<input_0=int64#1) # asm 2: movq <buf=%rsi,8(<input_0=%rdi) movq % rsi, 8( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 72 ] = buf # asm 1: movq <buf=int64#2,72(<input_0=int64#1) # asm 2: movq <buf=%rsi,72(<input_0=%rdi) movq % rsi, 72( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 136 ] = buf # asm 1: movq <buf=int64#2,136(<input_0=int64#1) # asm 2: movq <buf=%rsi,136(<input_0=%rdi) movq % rsi, 136( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 200 ] = buf # asm 1: movq <buf=int64#2,200(<input_0=int64#1) # asm 2: movq <buf=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 264 ] = buf # asm 1: movq <buf=int64#2,264(<input_0=int64#1) # asm 2: movq <buf=%rsi,264(<input_0=%rdi) movq % rsi, 264( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 328 ] = buf # asm 1: movq <buf=int64#2,328(<input_0=int64#1) # asm 2: movq <buf=%rsi,328(<input_0=%rdi) movq % rsi, 328( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 392 ] = buf # asm 1: movq <buf=int64#2,392(<input_0=int64#1) # asm 2: movq <buf=%rsi,392(<input_0=%rdi) movq % rsi, 392( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 456 ] = buf # asm 1: movq <buf=int64#2,456(<input_0=int64#1) # asm 2: movq <buf=%rsi,456(<input_0=%rdi) movq % rsi, 456( % rdi) # qhasm: r0 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6 movddup 16( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7 movddup 80( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9 movddup 208( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10 movddup 272( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11 movddup 336( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12 movddup 400( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13 movddup 464( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 16 ] = buf # asm 1: movq <buf=int64#2,16(<input_0=int64#1) # asm 2: movq <buf=%rsi,16(<input_0=%rdi) movq % rsi, 16( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 80 ] = buf # asm 1: movq <buf=int64#2,80(<input_0=int64#1) # asm 2: movq <buf=%rsi,80(<input_0=%rdi) movq % rsi, 80( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 144 ] = buf # asm 1: movq <buf=int64#2,144(<input_0=int64#1) # asm 2: movq <buf=%rsi,144(<input_0=%rdi) movq % rsi, 144( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 208 ] = buf # asm 1: movq <buf=int64#2,208(<input_0=int64#1) # asm 2: movq <buf=%rsi,208(<input_0=%rdi) movq % rsi, 208( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 272 ] = buf # asm 1: movq <buf=int64#2,272(<input_0=int64#1) # asm 2: movq <buf=%rsi,272(<input_0=%rdi) movq % rsi, 272( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 336 ] = buf # asm 1: movq <buf=int64#2,336(<input_0=int64#1) # asm 2: movq <buf=%rsi,336(<input_0=%rdi) movq % rsi, 336( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 400 ] = buf # asm 1: movq <buf=int64#2,400(<input_0=int64#1) # asm 2: movq <buf=%rsi,400(<input_0=%rdi) movq % rsi, 400( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 464 ] = buf # asm 1: movq <buf=int64#2,464(<input_0=int64#1) # asm 2: movq <buf=%rsi,464(<input_0=%rdi) movq % rsi, 464( % rdi) # qhasm: r0 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6 movddup 24( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7 movddup 88( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8 movddup 152( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10 movddup 280( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11 movddup 344( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12 movddup 408( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13 movddup 472( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 24 ] = buf # asm 1: movq <buf=int64#2,24(<input_0=int64#1) # asm 2: movq <buf=%rsi,24(<input_0=%rdi) movq % rsi, 24( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 88 ] = buf # asm 1: movq <buf=int64#2,88(<input_0=int64#1) # asm 2: movq <buf=%rsi,88(<input_0=%rdi) movq % rsi, 88( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 152 ] = buf # asm 1: movq <buf=int64#2,152(<input_0=int64#1) # asm 2: movq <buf=%rsi,152(<input_0=%rdi) movq % rsi, 152( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 216 ] = buf # asm 1: movq <buf=int64#2,216(<input_0=int64#1) # asm 2: movq <buf=%rsi,216(<input_0=%rdi) movq % rsi, 216( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 280 ] = buf # asm 1: movq <buf=int64#2,280(<input_0=int64#1) # asm 2: movq <buf=%rsi,280(<input_0=%rdi) movq % rsi, 280( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 344 ] = buf # asm 1: movq <buf=int64#2,344(<input_0=int64#1) # asm 2: movq <buf=%rsi,344(<input_0=%rdi) movq % rsi, 344( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 408 ] = buf # asm 1: movq <buf=int64#2,408(<input_0=int64#1) # asm 2: movq <buf=%rsi,408(<input_0=%rdi) movq % rsi, 408( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 472 ] = buf # asm 1: movq <buf=int64#2,472(<input_0=int64#1) # asm 2: movq <buf=%rsi,472(<input_0=%rdi) movq % rsi, 472( % rdi) # qhasm: r0 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6 movddup 32( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7 movddup 96( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8 movddup 160( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9 movddup 224( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11 movddup 352( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12 movddup 416( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13 movddup 480( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 32 ] = buf # asm 1: movq <buf=int64#2,32(<input_0=int64#1) # asm 2: movq <buf=%rsi,32(<input_0=%rdi) movq % rsi, 32( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 96 ] = buf # asm 1: movq <buf=int64#2,96(<input_0=int64#1) # asm 2: movq <buf=%rsi,96(<input_0=%rdi) movq % rsi, 96( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 160 ] = buf # asm 1: movq <buf=int64#2,160(<input_0=int64#1) # asm 2: movq <buf=%rsi,160(<input_0=%rdi) movq % rsi, 160( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 224 ] = buf # asm 1: movq <buf=int64#2,224(<input_0=int64#1) # asm 2: movq <buf=%rsi,224(<input_0=%rdi) movq % rsi, 224( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 288 ] = buf # asm 1: movq <buf=int64#2,288(<input_0=int64#1) # asm 2: movq <buf=%rsi,288(<input_0=%rdi) movq % rsi, 288( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 352 ] = buf # asm 1: movq <buf=int64#2,352(<input_0=int64#1) # asm 2: movq <buf=%rsi,352(<input_0=%rdi) movq % rsi, 352( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 416 ] = buf # asm 1: movq <buf=int64#2,416(<input_0=int64#1) # asm 2: movq <buf=%rsi,416(<input_0=%rdi) movq % rsi, 416( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 480 ] = buf # asm 1: movq <buf=int64#2,480(<input_0=int64#1) # asm 2: movq <buf=%rsi,480(<input_0=%rdi) movq % rsi, 480( % rdi) # qhasm: r0 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6 movddup 40( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7 movddup 104( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8 movddup 168( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9 movddup 232( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10 movddup 296( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12 movddup 424( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13 movddup 488( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 40 ] = buf # asm 1: movq <buf=int64#2,40(<input_0=int64#1) # asm 2: movq <buf=%rsi,40(<input_0=%rdi) movq % rsi, 40( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 104 ] = buf # asm 1: movq <buf=int64#2,104(<input_0=int64#1) # asm 2: movq <buf=%rsi,104(<input_0=%rdi) movq % rsi, 104( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 168 ] = buf # asm 1: movq <buf=int64#2,168(<input_0=int64#1) # asm 2: movq <buf=%rsi,168(<input_0=%rdi) movq % rsi, 168( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 232 ] = buf # asm 1: movq <buf=int64#2,232(<input_0=int64#1) # asm 2: movq <buf=%rsi,232(<input_0=%rdi) movq % rsi, 232( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 296 ] = buf # asm 1: movq <buf=int64#2,296(<input_0=int64#1) # asm 2: movq <buf=%rsi,296(<input_0=%rdi) movq % rsi, 296( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 360 ] = buf # asm 1: movq <buf=int64#2,360(<input_0=int64#1) # asm 2: movq <buf=%rsi,360(<input_0=%rdi) movq % rsi, 360( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 424 ] = buf # asm 1: movq <buf=int64#2,424(<input_0=int64#1) # asm 2: movq <buf=%rsi,424(<input_0=%rdi) movq % rsi, 424( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 488 ] = buf # asm 1: movq <buf=int64#2,488(<input_0=int64#1) # asm 2: movq <buf=%rsi,488(<input_0=%rdi) movq % rsi, 488( % rdi) # qhasm: r0 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6 movddup 48( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7 movddup 112( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8 movddup 176( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9 movddup 240( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10 movddup 304( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11 movddup 368( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13 movddup 496( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 48 ] = buf # asm 1: movq <buf=int64#2,48(<input_0=int64#1) # asm 2: movq <buf=%rsi,48(<input_0=%rdi) movq % rsi, 48( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi pextrq $0x0, % xmm13, % rsi # qhasm: mem64[ input_0 + 112 ] = buf # asm 1: movq <buf=int64#2,112(<input_0=int64#1) # asm 2: movq <buf=%rsi,112(<input_0=%rdi) movq % rsi, 112( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi pextrq $0x0, % xmm14, % rsi # qhasm: mem64[ input_0 + 176 ] = buf # asm 1: movq <buf=int64#2,176(<input_0=int64#1) # asm 2: movq <buf=%rsi,176(<input_0=%rdi) movq % rsi, 176( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi pextrq $0x0, % xmm10, % rsi # qhasm: mem64[ input_0 + 240 ] = buf # asm 1: movq <buf=int64#2,240(<input_0=int64#1) # asm 2: movq <buf=%rsi,240(<input_0=%rdi) movq % rsi, 240( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi pextrq $0x0, % xmm11, % rsi # qhasm: mem64[ input_0 + 304 ] = buf # asm 1: movq <buf=int64#2,304(<input_0=int64#1) # asm 2: movq <buf=%rsi,304(<input_0=%rdi) movq % rsi, 304( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 368 ] = buf # asm 1: movq <buf=int64#2,368(<input_0=int64#1) # asm 2: movq <buf=%rsi,368(<input_0=%rdi) movq % rsi, 368( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi pextrq $0x0, % xmm12, % rsi # qhasm: mem64[ input_0 + 432 ] = buf # asm 1: movq <buf=int64#2,432(<input_0=int64#1) # asm 2: movq <buf=%rsi,432(<input_0=%rdi) movq % rsi, 432( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi pextrq $0x0, % xmm6, % rsi # qhasm: mem64[ input_0 + 496 ] = buf # asm 1: movq <buf=int64#2,496(<input_0=int64#1) # asm 2: movq <buf=%rsi,496(<input_0=%rdi) movq % rsi, 496( % rdi) # qhasm: r0 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6 movddup 56( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7 movddup 120( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8 movddup 184( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9 movddup 248( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10 movddup 312( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11 movddup 376( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12 movddup 440( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = r4 << 32 # asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = r0 unsigned>> 32 # asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = r5 << 32 # asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = r1 unsigned>> 32 # asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = r6 << 32 # asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = r2 unsigned>> 32 # asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = r7 << 32 # asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = r3 unsigned>> 32 # asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = r2 << 16 # asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = r0 unsigned>> 16 # asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = r3 << 16 # asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = r1 unsigned>> 16 # asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = r6 << 16 # asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = r4 unsigned>> 16 # asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = r7 << 16 # asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = r5 unsigned>> 16 # asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = r1 << 8 # asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = r0 unsigned>> 8 # asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = r3 << 8 # asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = r2 unsigned>> 8 # asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = r5 << 8 # asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = r4 unsigned>> 8 # asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = r7 << 8 # asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = r6 unsigned>> 8 # asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: buf = r0[0] # asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2 # asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi pextrq $0x0, % xmm3, % rsi # qhasm: mem64[ input_0 + 56 ] = buf # asm 1: movq <buf=int64#2,56(<input_0=int64#1) # asm 2: movq <buf=%rsi,56(<input_0=%rdi) movq % rsi, 56( % rdi) # qhasm: buf = r1[0] # asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2 # asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi pextrq $0x0, % xmm7, % rsi # qhasm: mem64[ input_0 + 120 ] = buf # asm 1: movq <buf=int64#2,120(<input_0=int64#1) # asm 2: movq <buf=%rsi,120(<input_0=%rdi) movq % rsi, 120( % rdi) # qhasm: buf = r2[0] # asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2 # asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi pextrq $0x0, % xmm8, % rsi # qhasm: mem64[ input_0 + 184 ] = buf # asm 1: movq <buf=int64#2,184(<input_0=int64#1) # asm 2: movq <buf=%rsi,184(<input_0=%rdi) movq % rsi, 184( % rdi) # qhasm: buf = r3[0] # asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2 # asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi pextrq $0x0, % xmm0, % rsi # qhasm: mem64[ input_0 + 248 ] = buf # asm 1: movq <buf=int64#2,248(<input_0=int64#1) # asm 2: movq <buf=%rsi,248(<input_0=%rdi) movq % rsi, 248( % rdi) # qhasm: buf = r4[0] # asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2 # asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi pextrq $0x0, % xmm9, % rsi # qhasm: mem64[ input_0 + 312 ] = buf # asm 1: movq <buf=int64#2,312(<input_0=int64#1) # asm 2: movq <buf=%rsi,312(<input_0=%rdi) movq % rsi, 312( % rdi) # qhasm: buf = r5[0] # asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2 # asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi pextrq $0x0, % xmm2, % rsi # qhasm: mem64[ input_0 + 376 ] = buf # asm 1: movq <buf=int64#2,376(<input_0=int64#1) # asm 2: movq <buf=%rsi,376(<input_0=%rdi) movq % rsi, 376( % rdi) # qhasm: buf = r6[0] # asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2 # asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi pextrq $0x0, % xmm4, % rsi # qhasm: mem64[ input_0 + 440 ] = buf # asm 1: movq <buf=int64#2,440(<input_0=int64#1) # asm 2: movq <buf=%rsi,440(<input_0=%rdi) movq % rsi, 440( % rdi) # qhasm: buf = r7[0] # asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2 # asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi pextrq $0x0, % xmm1, % rsi # qhasm: mem64[ input_0 + 504 ] = buf # asm 1: movq <buf=int64#2,504(<input_0=int64#1) # asm 2: movq <buf=%rsi,504(<input_0=%rdi) movq % rsi, 504( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: r0 = mem64[ input_0 + 0 ] x2 # asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6 movddup 0( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 8 ] x2 # asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7 movddup 8( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 16 ] x2 # asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8 movddup 16( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 24 ] x2 # asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9 movddup 24( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 32 ] x2 # asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10 movddup 32( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 40 ] x2 # asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11 movddup 40( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 48 ] x2 # asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12 movddup 48( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 56 ] x2 # asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13 movddup 56( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 0 ] = t0 # asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi) movdqu % xmm7, 0( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 16 ] = t0 # asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi) movdqu % xmm7, 16( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 32 ] = t0 # asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi) movdqu % xmm7, 32( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 48 ] = t0 # asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi) movdqu % xmm6, 48( % rdi) # qhasm: r0 = mem64[ input_0 + 64 ] x2 # asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6 movddup 64( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 72 ] x2 # asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7 movddup 72( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 80 ] x2 # asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8 movddup 80( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 88 ] x2 # asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9 movddup 88( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 96 ] x2 # asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10 movddup 96( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 104 ] x2 # asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11 movddup 104( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 112 ] x2 # asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12 movddup 112( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 120 ] x2 # asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13 movddup 120( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 64 ] = t0 # asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi) movdqu % xmm7, 64( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 80 ] = t0 # asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi) movdqu % xmm7, 80( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 96 ] = t0 # asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi) movdqu % xmm7, 96( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 112 ] = t0 # asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: r0 = mem64[ input_0 + 128 ] x2 # asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6 movddup 128( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 136 ] x2 # asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7 movddup 136( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 144 ] x2 # asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8 movddup 144( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 152 ] x2 # asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9 movddup 152( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 160 ] x2 # asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10 movddup 160( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 168 ] x2 # asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11 movddup 168( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 176 ] x2 # asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12 movddup 176( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 184 ] x2 # asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13 movddup 184( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 128 ] = t0 # asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi) movdqu % xmm7, 128( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 144 ] = t0 # asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi) movdqu % xmm7, 144( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 160 ] = t0 # asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi) movdqu % xmm7, 160( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 176 ] = t0 # asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi) movdqu % xmm6, 176( % rdi) # qhasm: r0 = mem64[ input_0 + 192 ] x2 # asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6 movddup 192( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 200 ] x2 # asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7 movddup 200( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 208 ] x2 # asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8 movddup 208( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 216 ] x2 # asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9 movddup 216( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 224 ] x2 # asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10 movddup 224( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 232 ] x2 # asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11 movddup 232( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 240 ] x2 # asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12 movddup 240( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 248 ] x2 # asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13 movddup 248( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 192 ] = t0 # asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi) movdqu % xmm7, 192( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 208 ] = t0 # asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi) movdqu % xmm7, 208( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 224 ] = t0 # asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi) movdqu % xmm7, 224( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 240 ] = t0 # asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: r0 = mem64[ input_0 + 256 ] x2 # asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6 movddup 256( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 264 ] x2 # asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7 movddup 264( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 272 ] x2 # asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8 movddup 272( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 280 ] x2 # asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9 movddup 280( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 288 ] x2 # asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10 movddup 288( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 296 ] x2 # asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11 movddup 296( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 304 ] x2 # asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12 movddup 304( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 312 ] x2 # asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13 movddup 312( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 256 ] = t0 # asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi) movdqu % xmm7, 256( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 272 ] = t0 # asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi) movdqu % xmm7, 272( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 288 ] = t0 # asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi) movdqu % xmm7, 288( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 304 ] = t0 # asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi) movdqu % xmm6, 304( % rdi) # qhasm: r0 = mem64[ input_0 + 320 ] x2 # asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6 movddup 320( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 328 ] x2 # asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7 movddup 328( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 336 ] x2 # asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8 movddup 336( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 344 ] x2 # asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9 movddup 344( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 352 ] x2 # asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10 movddup 352( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 360 ] x2 # asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11 movddup 360( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 368 ] x2 # asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12 movddup 368( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 376 ] x2 # asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13 movddup 376( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 320 ] = t0 # asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi) movdqu % xmm7, 320( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 336 ] = t0 # asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi) movdqu % xmm7, 336( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 352 ] = t0 # asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi) movdqu % xmm7, 352( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 368 ] = t0 # asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: r0 = mem64[ input_0 + 384 ] x2 # asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6 movddup 384( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 392 ] x2 # asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7 movddup 392( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 400 ] x2 # asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8 movddup 400( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 408 ] x2 # asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9 movddup 408( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 416 ] x2 # asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10 movddup 416( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 424 ] x2 # asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11 movddup 424( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 432 ] x2 # asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12 movddup 432( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 440 ] x2 # asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13 movddup 440( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8 # asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7 vpunpcklqdq % xmm13, % xmm9, % xmm7 # qhasm: mem128[ input_0 + 384 ] = t0 # asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi) movdqu % xmm7, 384( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8 # asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7 vpunpcklqdq % xmm10, % xmm14, % xmm7 # qhasm: mem128[ input_0 + 400 ] = t0 # asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi) movdqu % xmm7, 400( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8 # asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7 vpunpcklqdq % xmm8, % xmm11, % xmm7 # qhasm: mem128[ input_0 + 416 ] = t0 # asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1) # asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi) movdqu % xmm7, 416( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7 # asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6 vpunpcklqdq % xmm6, % xmm12, % xmm6 # qhasm: mem128[ input_0 + 432 ] = t0 # asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1) # asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi) movdqu % xmm6, 432( % rdi) # qhasm: r0 = mem64[ input_0 + 448 ] x2 # asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7 # asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6 movddup 448( % rdi), % xmm6 # qhasm: r1 = mem64[ input_0 + 456 ] x2 # asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8 # asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7 movddup 456( % rdi), % xmm7 # qhasm: r2 = mem64[ input_0 + 464 ] x2 # asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9 # asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8 movddup 464( % rdi), % xmm8 # qhasm: r3 = mem64[ input_0 + 472 ] x2 # asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10 # asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9 movddup 472( % rdi), % xmm9 # qhasm: r4 = mem64[ input_0 + 480 ] x2 # asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11 # asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10 movddup 480( % rdi), % xmm10 # qhasm: r5 = mem64[ input_0 + 488 ] x2 # asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12 # asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11 movddup 488( % rdi), % xmm11 # qhasm: r6 = mem64[ input_0 + 496 ] x2 # asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13 # asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12 movddup 496( % rdi), % xmm12 # qhasm: r7 = mem64[ input_0 + 504 ] x2 # asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14 # asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13 movddup 504( % rdi), % xmm13 # qhasm: v00 = r0 & mask0 # asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = r4 & mask0 # asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r0 & mask1 # asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = r4 & mask1 # asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: r4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = r1 & mask0 # asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = r5 & mask0 # asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r1 & mask1 # asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = r5 & mask1 # asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = r2 & mask0 # asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = r6 & mask0 # asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = r2 & mask1 # asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = r6 & mask1 # asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = r3 & mask0 # asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = r7 & mask0 # asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = r3 & mask1 # asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = r7 & mask1 # asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: r3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = r0 & mask2 # asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = r2 & mask2 # asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = r0 & mask3 # asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = r2 & mask3 # asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: r2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = r1 & mask2 # asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = r3 & mask2 # asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r1 & mask3 # asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = r3 & mask3 # asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: r1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = r4 & mask2 # asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = r6 & mask2 # asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = r4 & mask3 # asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = r6 & mask3 # asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: r6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = r5 & mask2 # asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = r7 & mask2 # asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = r5 & mask3 # asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = r7 & mask3 # asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: r5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = r0 & mask4 # asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = r1 & mask4 # asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = r0 & mask5 # asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = r1 & mask5 # asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: r0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: r1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = r2 & mask4 # asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = r3 & mask4 # asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = r2 & mask5 # asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = r3 & mask5 # asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: r2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: r3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = r4 & mask4 # asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = r5 & mask4 # asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = r4 & mask5 # asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = r5 & mask5 # asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: r4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: r5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = r6 & mask4 # asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = r7 & mask4 # asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = r6 & mask5 # asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = r7 & mask5 # asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: r6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: r7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: t0 = r0[0]r1[0] # asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4 # asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3 vpunpcklqdq % xmm7, % xmm3, % xmm3 # qhasm: mem128[ input_0 + 448 ] = t0 # asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1) # asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi) movdqu % xmm3, 448( % rdi) # qhasm: t0 = r2[0]r3[0] # asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1 # asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0 vpunpcklqdq % xmm0, % xmm8, % xmm0 # qhasm: mem128[ input_0 + 464 ] = t0 # asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi) movdqu % xmm0, 464( % rdi) # qhasm: t0 = r4[0]r5[0] # asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1 # asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0 vpunpcklqdq % xmm2, % xmm9, % xmm0 # qhasm: mem128[ input_0 + 480 ] = t0 # asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi) movdqu % xmm0, 480( % rdi) # qhasm: t0 = r6[0]r7[0] # asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1 # asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0 vpunpcklqdq % xmm1, % xmm4, % xmm0 # qhasm: mem128[ input_0 + 496 ] = t0 # asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,712
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/consts.S
#include "namespace.h" #if defined(__APPLE__) #define ASM_HIDDEN .private_extern #else #define ASM_HIDDEN .hidden #endif #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) .data ASM_HIDDEN MASK0_0 ASM_HIDDEN MASK0_1 ASM_HIDDEN MASK1_0 ASM_HIDDEN MASK1_1 ASM_HIDDEN MASK2_0 ASM_HIDDEN MASK2_1 ASM_HIDDEN MASK3_0 ASM_HIDDEN MASK3_1 ASM_HIDDEN MASK4_0 ASM_HIDDEN MASK4_1 ASM_HIDDEN MASK5_0 ASM_HIDDEN MASK5_1 .globl MASK0_0 .globl MASK0_1 .globl MASK1_0 .globl MASK1_1 .globl MASK2_0 .globl MASK2_1 .globl MASK3_0 .globl MASK3_1 .globl MASK4_0 .globl MASK4_1 .globl MASK5_0 .globl MASK5_1 .p2align 5 MASK0_0: .quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555 MASK0_1: .quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA MASK1_0: .quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333 MASK1_1: .quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC MASK2_0: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F MASK2_1: .quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0 MASK3_0: .quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF MASK3_1: .quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00 MASK4_0: .quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF MASK4_1: .quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000 MASK5_0: .quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF MASK5_1: .quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
mktmansour/MKT-KSA-Geolocation-Security
14,915
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/update_asm.S
#include "namespace.h" #define update_asm CRYPTO_NAMESPACE(update_asm) #define _update_asm _CRYPTO_NAMESPACE(update_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 s0 # qhasm: int64 s1 # qhasm: int64 s2 # qhasm: enter update_asm .p2align 5 .global _update_asm .global update_asm _update_asm: update_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: s2 = input_1 # asm 1: mov <input_1=int64#2,>s2=int64#2 # asm 2: mov <input_1=%rsi,>s2=%rsi mov % rsi, % rsi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: s0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>s0=int64#4 # asm 2: movq 0(<input_0=%rdi),>s0=%rcx movq 0( % rdi), % rcx # qhasm: s1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>s1=int64#5 # asm 2: movq 8(<input_0=%rdi),>s1=%r8 movq 8( % rdi), % r8 # qhasm: s0 = (s1 s0) >> 1 # asm 1: shrd $1,<s1=int64#5,<s0=int64#4 # asm 2: shrd $1,<s1=%r8,<s0=%rcx shrd $1, % r8, % rcx # qhasm: s1 = (s2 s1) >> 1 # asm 1: shrd $1,<s2=int64#2,<s1=int64#5 # asm 2: shrd $1,<s2=%rsi,<s1=%r8 shrd $1, % rsi, % r8 # qhasm: (uint64) s2 >>= 1 # asm 1: shr $1,<s2=int64#2 # asm 2: shr $1,<s2=%rsi shr $1, % rsi # qhasm: mem64[ input_0 + 0 ] = s0 # asm 1: movq <s0=int64#4,0(<input_0=int64#1) # asm 2: movq <s0=%rcx,0(<input_0=%rdi) movq % rcx, 0( % rdi) # qhasm: mem64[ input_0 + 8 ] = s1 # asm 1: movq <s1=int64#5,8(<input_0=int64#1) # asm 2: movq <s1=%r8,8(<input_0=%rdi) movq % r8, 8( % rdi) # qhasm: input_0 += input_2 # asm 1: add <input_2=int64#3,<input_0=int64#1 # asm 2: add <input_2=%rdx,<input_0=%rdi add % rdx, % rdi # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
53,565
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/vec128_mul_asm.S
#include "namespace.h" #define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm) #define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 b2 # qhasm: reg256 b3 # qhasm: reg256 b4 # qhasm: reg256 b5 # qhasm: reg256 b6 # qhasm: reg256 b7 # qhasm: reg256 b8 # qhasm: reg256 b9 # qhasm: reg256 b10 # qhasm: reg256 b11 # qhasm: reg256 b12 # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: reg128 h0 # qhasm: reg128 h1 # qhasm: reg128 h2 # qhasm: reg128 h3 # qhasm: reg128 h4 # qhasm: reg128 h5 # qhasm: reg128 h6 # qhasm: reg128 h7 # qhasm: reg128 h8 # qhasm: reg128 h9 # qhasm: reg128 h10 # qhasm: reg128 h11 # qhasm: reg128 h12 # qhasm: reg128 h13 # qhasm: reg128 h14 # qhasm: reg128 h15 # qhasm: reg128 h16 # qhasm: reg128 h17 # qhasm: reg128 h18 # qhasm: reg128 h19 # qhasm: reg128 h20 # qhasm: reg128 h21 # qhasm: reg128 h22 # qhasm: reg128 h23 # qhasm: reg128 h24 # qhasm: stack4864 buf # qhasm: int64 ptr # qhasm: int64 tmp # qhasm: enter vec128_mul_asm .p2align 5 .global _vec128_mul_asm .global vec128_mul_asm _vec128_mul_asm: vec128_mul_asm: mov % rsp, % r11 and $31, % r11 add $608, % r11 sub % r11, % rsp # qhasm: ptr = &buf # asm 1: leaq <buf=stack4864#1,>ptr=int64#5 # asm 2: leaq <buf=0(%rsp),>ptr=%r8 leaq 0( % rsp), % r8 # qhasm: tmp = input_3 # asm 1: mov <input_3=int64#4,>tmp=int64#6 # asm 2: mov <input_3=%rcx,>tmp=%r9 mov % rcx, % r9 # qhasm: tmp *= 12 # asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6 # asm 2: imulq $12,<tmp=%r9,>tmp=%r9 imulq $12, % r9, % r9 # qhasm: input_2 += tmp # asm 1: add <tmp=int64#6,<input_2=int64#3 # asm 2: add <tmp=%r9,<input_2=%rdx add % r9, % rdx # qhasm: b12 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0 vbroadcasti128 0( % rdx), % ymm0 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: a6 = a6 ^ a6 # asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2 # asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1 vpxor % ymm1, % ymm1, % ymm1 # qhasm: a6[0] = mem128[ input_1 + 96 ] # asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2 # asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1 vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1 # qhasm: r18 = b12 & a6 # asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3 # asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2 vpand % ymm0, % ymm1, % ymm2 # qhasm: mem256[ ptr + 576 ] = r18 # asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5) # asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8) vmovupd % ymm2, 576( % r8) # qhasm: a5[0] = mem128[ input_1 + 80 ] # asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2 # qhasm: a5[1] = mem128[ input_1 + 192 ] # asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3 # asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2 vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2 # qhasm: r17 = b12 & a5 # asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4 # asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3 vpand % ymm0, % ymm2, % ymm3 # qhasm: a4[0] = mem128[ input_1 + 64 ] # asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4 # qhasm: a4[1] = mem128[ input_1 + 176 ] # asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5 # asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4 vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4 # qhasm: r16 = b12 & a4 # asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6 # asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5 vpand % ymm0, % ymm4, % ymm5 # qhasm: a3[0] = mem128[ input_1 + 48 ] # asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6 # qhasm: a3[1] = mem128[ input_1 + 160 ] # asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7 # asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6 vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6 # qhasm: r15 = b12 & a3 # asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8 # asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7 vpand % ymm0, % ymm6, % ymm7 # qhasm: a2[0] = mem128[ input_1 + 32 ] # asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8 # qhasm: a2[1] = mem128[ input_1 + 144 ] # asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9 # asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8 vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8 # qhasm: r14 = b12 & a2 # asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10 # asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9 vpand % ymm0, % ymm8, % ymm9 # qhasm: a1[0] = mem128[ input_1 + 16 ] # asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10 # qhasm: a1[1] = mem128[ input_1 + 128 ] # asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11 # asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10 vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10 # qhasm: r13 = b12 & a1 # asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12 # asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11 vpand % ymm0, % ymm10, % ymm11 # qhasm: a0[0] = mem128[ input_1 + 0 ] # asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12 # qhasm: a0[1] = mem128[ input_1 + 112 ] # asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13 # asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12 vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12 # qhasm: r12 = b12 & a0 # asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1 # asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0 vpand % ymm0, % ymm12, % ymm0 # qhasm: b11 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b11 & a6 # asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4 # asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 544 ] = r17 # asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5) # asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8) vmovupd % ymm3, 544( % r8) # qhasm: r = b11 & a5 # asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b11 & a4 # asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b11 & a3 # asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b11 & a2 # asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b11 & a1 # asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r11 = b11 & a0 # asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4 # asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b10 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b10 & a6 # asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6 # asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 512 ] = r16 # asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5) # asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8) vmovupd % ymm5, 512( % r8) # qhasm: r = b10 & a5 # asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b10 & a4 # asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b10 & a3 # asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b10 & a2 # asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b10 & a1 # asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r10 = b10 & a0 # asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6 # asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b9 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b9 & a6 # asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8 # asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 480 ] = r15 # asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5) # asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8) vmovupd % ymm7, 480( % r8) # qhasm: r = b9 & a5 # asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b9 & a4 # asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b9 & a3 # asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b9 & a2 # asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b9 & a1 # asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r9 = b9 & a0 # asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8 # asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b8 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b8 & a6 # asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10 # asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 448 ] = r14 # asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5) # asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8) vmovupd % ymm9, 448( % r8) # qhasm: r = b8 & a5 # asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b8 & a4 # asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b8 & a3 # asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b8 & a2 # asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b8 & a1 # asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r8 = b8 & a0 # asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10 # asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b7 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b7 & a6 # asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12 # asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 416 ] = r13 # asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5) # asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8) vmovupd % ymm11, 416( % r8) # qhasm: r = b7 & a5 # asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b7 & a4 # asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b7 & a3 # asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b7 & a2 # asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b7 & a1 # asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r7 = b7 & a0 # asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12 # asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b6 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b6 & a6 # asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1 # asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0 vpxor % ymm14, % ymm0, % ymm0 # qhasm: mem256[ ptr + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5) # asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8) vmovupd % ymm0, 384( % r8) # qhasm: r = b6 & a5 # asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b6 & a4 # asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b6 & a3 # asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b6 & a2 # asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b6 & a1 # asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r6 = b6 & a0 # asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1 # asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: b5 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b5 & a6 # asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4 # asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: mem256[ ptr + 352 ] = r11 # asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5) # asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8) vmovupd % ymm3, 352( % r8) # qhasm: r = b5 & a5 # asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3 vpand % ymm13, % ymm2, % ymm3 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5 vpxor % ymm3, % ymm5, % ymm5 # qhasm: r = b5 & a4 # asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3 vpand % ymm13, % ymm4, % ymm3 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r = b5 & a3 # asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3 vpand % ymm13, % ymm6, % ymm3 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9 vpxor % ymm3, % ymm9, % ymm9 # qhasm: r = b5 & a2 # asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3 vpand % ymm13, % ymm8, % ymm3 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11 vpxor % ymm3, % ymm11, % ymm11 # qhasm: r = b5 & a1 # asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4 # asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3 vpand % ymm13, % ymm10, % ymm3 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0 vpxor % ymm3, % ymm0, % ymm0 # qhasm: r5 = b5 & a0 # asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4 # asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3 vpand % ymm13, % ymm12, % ymm3 # qhasm: b4 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b4 & a6 # asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6 # asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: mem256[ ptr + 320 ] = r10 # asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5) # asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8) vmovupd % ymm5, 320( % r8) # qhasm: r = b4 & a5 # asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5 vpand % ymm13, % ymm2, % ymm5 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7 vpxor % ymm5, % ymm7, % ymm7 # qhasm: r = b4 & a4 # asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5 vpand % ymm13, % ymm4, % ymm5 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r = b4 & a3 # asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5 vpand % ymm13, % ymm6, % ymm5 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11 vpxor % ymm5, % ymm11, % ymm11 # qhasm: r = b4 & a2 # asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5 vpand % ymm13, % ymm8, % ymm5 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0 vpxor % ymm5, % ymm0, % ymm0 # qhasm: r = b4 & a1 # asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6 # asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5 vpand % ymm13, % ymm10, % ymm5 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3 vpxor % ymm5, % ymm3, % ymm3 # qhasm: r4 = b4 & a0 # asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6 # asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5 vpand % ymm13, % ymm12, % ymm5 # qhasm: b3 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b3 & a6 # asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8 # asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: mem256[ ptr + 288 ] = r9 # asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5) # asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8) vmovupd % ymm7, 288( % r8) # qhasm: r = b3 & a5 # asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7 vpand % ymm13, % ymm2, % ymm7 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9 vpxor % ymm7, % ymm9, % ymm9 # qhasm: r = b3 & a4 # asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7 vpand % ymm13, % ymm4, % ymm7 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r = b3 & a3 # asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7 vpand % ymm13, % ymm6, % ymm7 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0 vpxor % ymm7, % ymm0, % ymm0 # qhasm: r = b3 & a2 # asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7 vpand % ymm13, % ymm8, % ymm7 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3 vpxor % ymm7, % ymm3, % ymm3 # qhasm: r = b3 & a1 # asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8 # asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7 vpand % ymm13, % ymm10, % ymm7 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5 vpxor % ymm7, % ymm5, % ymm5 # qhasm: r3 = b3 & a0 # asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8 # asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7 vpand % ymm13, % ymm12, % ymm7 # qhasm: b2 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b2 & a6 # asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10 # asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: mem256[ ptr + 256 ] = r8 # asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5) # asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8) vmovupd % ymm9, 256( % r8) # qhasm: r = b2 & a5 # asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9 vpand % ymm13, % ymm2, % ymm9 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11 vpxor % ymm9, % ymm11, % ymm11 # qhasm: r = b2 & a4 # asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0 vpxor % ymm9, % ymm0, % ymm0 # qhasm: r = b2 & a3 # asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9 vpand % ymm13, % ymm6, % ymm9 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3 vpxor % ymm9, % ymm3, % ymm3 # qhasm: r = b2 & a2 # asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9 vpand % ymm13, % ymm8, % ymm9 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5 vpxor % ymm9, % ymm5, % ymm5 # qhasm: r = b2 & a1 # asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10 # asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9 vpand % ymm13, % ymm10, % ymm9 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7 vpxor % ymm9, % ymm7, % ymm7 # qhasm: r2 = b2 & a0 # asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10 # asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9 vpand % ymm13, % ymm12, % ymm9 # qhasm: b1 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b1 & a6 # asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15 # asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14 vpand % ymm13, % ymm1, % ymm14 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12 # asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: mem256[ ptr + 224 ] = r7 # asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5) # asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8) vmovupd % ymm11, 224( % r8) # qhasm: r = b1 & a5 # asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11 vpand % ymm13, % ymm2, % ymm11 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0 vpxor % ymm11, % ymm0, % ymm0 # qhasm: r = b1 & a4 # asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11 vpand % ymm13, % ymm4, % ymm11 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3 vpxor % ymm11, % ymm3, % ymm3 # qhasm: r = b1 & a3 # asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11 vpand % ymm13, % ymm6, % ymm11 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5 vpxor % ymm11, % ymm5, % ymm5 # qhasm: r = b1 & a2 # asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11 vpand % ymm13, % ymm8, % ymm11 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7 vpxor % ymm11, % ymm7, % ymm7 # qhasm: r = b1 & a1 # asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12 # asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11 vpand % ymm13, % ymm10, % ymm11 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9 vpxor % ymm11, % ymm9, % ymm9 # qhasm: r1 = b1 & a0 # asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12 # asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11 vpand % ymm13, % ymm12, % ymm11 # qhasm: b0 = mem128[ input_2 + 0 ] x2 # asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14 # asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13 vbroadcasti128 0( % rdx), % ymm13 # qhasm: input_2 -= input_3 # asm 1: sub <input_3=int64#4,<input_2=int64#3 # asm 2: sub <input_3=%rcx,<input_2=%rdx sub % rcx, % rdx # qhasm: r = b0 & a6 # asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2 # asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1 # asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ ptr + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5) # asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8) vmovupd % ymm0, 192( % r8) # qhasm: r = b0 & a5 # asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0 vpand % ymm13, % ymm2, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4 # asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = b0 & a4 # asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0 vpand % ymm13, % ymm4, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6 # asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = b0 & a3 # asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0 vpand % ymm13, % ymm6, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8 # asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = b0 & a2 # asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0 vpand % ymm13, % ymm8, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10 # asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = b0 & a1 # asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1 # asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0 vpand % ymm13, % ymm10, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12 # asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r0 = b0 & a0 # asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1 # asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0 vpand % ymm13, % ymm12, % ymm0 # qhasm: mem256[ ptr + 160 ] = r5 # asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5) # asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8) vmovupd % ymm3, 160( % r8) # qhasm: mem256[ ptr + 128 ] = r4 # asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5) # asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8) vmovupd % ymm5, 128( % r8) # qhasm: mem256[ ptr + 96 ] = r3 # asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5) # asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8) vmovupd % ymm7, 96( % r8) # qhasm: mem256[ ptr + 64 ] = r2 # asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5) # asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8) vmovupd % ymm9, 64( % r8) # qhasm: mem256[ ptr + 32 ] = r1 # asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5) # asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8) vmovupd % ymm11, 32( % r8) # qhasm: mem256[ ptr + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5) # asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8) vmovupd % ymm0, 0( % r8) # qhasm: vzeroupper vzeroupper # qhasm: h24 = mem128[ ptr + 560 ] # asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1 # asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0 movdqu 560( % r8), % xmm0 # qhasm: h11 = h24 # asm 1: movdqa <h24=reg128#1,>h11=reg128#2 # asm 2: movdqa <h24=%xmm0,>h11=%xmm1 movdqa % xmm0, % xmm1 # qhasm: h12 = h24 # asm 1: movdqa <h24=reg128#1,>h12=reg128#3 # asm 2: movdqa <h24=%xmm0,>h12=%xmm2 movdqa % xmm0, % xmm2 # qhasm: h14 = h24 # asm 1: movdqa <h24=reg128#1,>h14=reg128#4 # asm 2: movdqa <h24=%xmm0,>h14=%xmm3 movdqa % xmm0, % xmm3 # qhasm: h15 = h24 # asm 1: movdqa <h24=reg128#1,>h15=reg128#1 # asm 2: movdqa <h24=%xmm0,>h15=%xmm0 movdqa % xmm0, % xmm0 # qhasm: h23 = mem128[ ptr + 528 ] # asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5 # asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4 movdqu 528( % r8), % xmm4 # qhasm: h10 = h23 # asm 1: movdqa <h23=reg128#5,>h10=reg128#6 # asm 2: movdqa <h23=%xmm4,>h10=%xmm5 movdqa % xmm4, % xmm5 # qhasm: h11 = h11 ^ h23 # asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1 vpxor % xmm4, % xmm1, % xmm1 # qhasm: h13 = h23 # asm 1: movdqa <h23=reg128#5,>h13=reg128#7 # asm 2: movdqa <h23=%xmm4,>h13=%xmm6 movdqa % xmm4, % xmm6 # qhasm: h14 = h14 ^ h23 # asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3 vpxor % xmm4, % xmm3, % xmm3 # qhasm: h22 = mem128[ ptr + 496 ] # asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5 # asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4 movdqu 496( % r8), % xmm4 # qhasm: h9 = h22 # asm 1: movdqa <h22=reg128#5,>h9=reg128#8 # asm 2: movdqa <h22=%xmm4,>h9=%xmm7 movdqa % xmm4, % xmm7 # qhasm: h10 = h10 ^ h22 # asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5 vpxor % xmm4, % xmm5, % xmm5 # qhasm: h12 = h12 ^ h22 # asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2 vpxor % xmm4, % xmm2, % xmm2 # qhasm: h13 = h13 ^ h22 # asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5 # asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4 vpxor % xmm4, % xmm6, % xmm4 # qhasm: h21 = mem128[ ptr + 464 ] # asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7 # asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6 movdqu 464( % r8), % xmm6 # qhasm: h8 = h21 # asm 1: movdqa <h21=reg128#7,>h8=reg128#9 # asm 2: movdqa <h21=%xmm6,>h8=%xmm8 movdqa % xmm6, % xmm8 # qhasm: h9 = h9 ^ h21 # asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h11 = h11 ^ h21 # asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h12 = h12 ^ h21 # asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2 vpxor % xmm6, % xmm2, % xmm2 # qhasm: h20 = mem128[ ptr + 432 ] # asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7 # asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6 movdqu 432( % r8), % xmm6 # qhasm: h7 = h20 # asm 1: movdqa <h20=reg128#7,>h7=reg128#10 # asm 2: movdqa <h20=%xmm6,>h7=%xmm9 movdqa % xmm6, % xmm9 # qhasm: h8 = h8 ^ h20 # asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h10 = h10 ^ h20 # asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h11 = h11 ^ h20 # asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1 vpxor % xmm6, % xmm1, % xmm1 # qhasm: h19 = mem128[ ptr + 400 ] # asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7 # asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6 movdqu 400( % r8), % xmm6 # qhasm: h6 = h19 # asm 1: movdqa <h19=reg128#7,>h6=reg128#11 # asm 2: movdqa <h19=%xmm6,>h6=%xmm10 movdqa % xmm6, % xmm10 # qhasm: h7 = h7 ^ h19 # asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9 vpxor % xmm6, % xmm9, % xmm9 # qhasm: h9 = h9 ^ h19 # asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8 # asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7 vpxor % xmm6, % xmm7, % xmm7 # qhasm: h10 = h10 ^ h19 # asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6 # asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5 vpxor % xmm6, % xmm5, % xmm5 # qhasm: h18 = mem128[ ptr + 368 ] # asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7 # asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6 movdqu 368( % r8), % xmm6 # qhasm: h18 = h18 ^ mem128[ ptr + 576 ] # asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7 # asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6 vpxor 576( % r8), % xmm6, % xmm6 # qhasm: h5 = h18 # asm 1: movdqa <h18=reg128#7,>h5=reg128#12 # asm 2: movdqa <h18=%xmm6,>h5=%xmm11 movdqa % xmm6, % xmm11 # qhasm: h6 = h6 ^ h18 # asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10 vpxor % xmm6, % xmm10, % xmm10 # qhasm: h8 = h8 ^ h18 # asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9 # asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8 vpxor % xmm6, % xmm8, % xmm8 # qhasm: h9 = h9 ^ h18 # asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7 # asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6 vpxor % xmm6, % xmm7, % xmm6 # qhasm: h17 = mem128[ ptr + 336 ] # asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8 # asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7 movdqu 336( % r8), % xmm7 # qhasm: h17 = h17 ^ mem128[ ptr + 544 ] # asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8 # asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7 vpxor 544( % r8), % xmm7, % xmm7 # qhasm: h4 = h17 # asm 1: movdqa <h17=reg128#8,>h4=reg128#13 # asm 2: movdqa <h17=%xmm7,>h4=%xmm12 movdqa % xmm7, % xmm12 # qhasm: h5 = h5 ^ h17 # asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11 vpxor % xmm7, % xmm11, % xmm11 # qhasm: h7 = h7 ^ h17 # asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10 # asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9 vpxor % xmm7, % xmm9, % xmm9 # qhasm: h8 = h8 ^ h17 # asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8 # asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7 vpxor % xmm7, % xmm8, % xmm7 # qhasm: h16 = mem128[ ptr + 304 ] # asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9 # asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8 movdqu 304( % r8), % xmm8 # qhasm: h16 = h16 ^ mem128[ ptr + 512 ] # asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9 # asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8 vpxor 512( % r8), % xmm8, % xmm8 # qhasm: h3 = h16 # asm 1: movdqa <h16=reg128#9,>h3=reg128#14 # asm 2: movdqa <h16=%xmm8,>h3=%xmm13 movdqa % xmm8, % xmm13 # qhasm: h4 = h4 ^ h16 # asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12 vpxor % xmm8, % xmm12, % xmm12 # qhasm: h6 = h6 ^ h16 # asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11 # asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10 vpxor % xmm8, % xmm10, % xmm10 # qhasm: h7 = h7 ^ h16 # asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9 # asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8 vpxor % xmm8, % xmm9, % xmm8 # qhasm: h15 = h15 ^ mem128[ ptr + 272 ] # asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 272( % r8), % xmm0, % xmm0 # qhasm: h15 = h15 ^ mem128[ ptr + 480 ] # asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1 # asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0 vpxor 480( % r8), % xmm0, % xmm0 # qhasm: h2 = h15 # asm 1: movdqa <h15=reg128#1,>h2=reg128#10 # asm 2: movdqa <h15=%xmm0,>h2=%xmm9 movdqa % xmm0, % xmm9 # qhasm: h3 = h3 ^ h15 # asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13 vpxor % xmm0, % xmm13, % xmm13 # qhasm: h5 = h5 ^ h15 # asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12 # asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11 vpxor % xmm0, % xmm11, % xmm11 # qhasm: h6 = h6 ^ h15 # asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1 # asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0 vpxor % xmm0, % xmm10, % xmm0 # qhasm: h14 = h14 ^ mem128[ ptr + 240 ] # asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 240( % r8), % xmm3, % xmm3 # qhasm: h14 = h14 ^ mem128[ ptr + 448 ] # asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4 # asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3 vpxor 448( % r8), % xmm3, % xmm3 # qhasm: h1 = h14 # asm 1: movdqa <h14=reg128#4,>h1=reg128#11 # asm 2: movdqa <h14=%xmm3,>h1=%xmm10 movdqa % xmm3, % xmm10 # qhasm: h2 = h2 ^ h14 # asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10 # asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9 vpxor % xmm3, % xmm9, % xmm9 # qhasm: h4 = h4 ^ h14 # asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13 # asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12 vpxor % xmm3, % xmm12, % xmm12 # qhasm: h5 = h5 ^ h14 # asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4 # asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3 vpxor % xmm3, % xmm11, % xmm3 # qhasm: h13 = h13 ^ mem128[ ptr + 208 ] # asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 208( % r8), % xmm4, % xmm4 # qhasm: h13 = h13 ^ mem128[ ptr + 416 ] # asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5 # asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4 vpxor 416( % r8), % xmm4, % xmm4 # qhasm: h0 = h13 # asm 1: movdqa <h13=reg128#5,>h0=reg128#12 # asm 2: movdqa <h13=%xmm4,>h0=%xmm11 movdqa % xmm4, % xmm11 # qhasm: h1 = h1 ^ h13 # asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11 # asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10 vpxor % xmm4, % xmm10, % xmm10 # qhasm: h3 = h3 ^ h13 # asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14 # asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13 vpxor % xmm4, % xmm13, % xmm13 # qhasm: h4 = h4 ^ h13 # asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5 # asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4 vpxor % xmm4, % xmm12, % xmm4 # qhasm: h12 = h12 ^ mem128[ ptr + 384 ] # asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 384( % r8), % xmm2, % xmm2 # qhasm: h12 = h12 ^ mem128[ ptr + 176 ] # asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3 # asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2 vpxor 176( % r8), % xmm2, % xmm2 # qhasm: mem128[ input_0 + 192 ] = h12 # asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1) # asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi) movdqu % xmm2, 192( % rdi) # qhasm: h11 = h11 ^ mem128[ ptr + 352 ] # asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 352( % r8), % xmm1, % xmm1 # qhasm: h11 = h11 ^ mem128[ ptr + 144 ] # asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2 # asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1 vpxor 144( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 176 ] = h11 # asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1) # asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi) movdqu % xmm1, 176( % rdi) # qhasm: h10 = h10 ^ mem128[ ptr + 320 ] # asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2 # asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1 vpxor 320( % r8), % xmm5, % xmm1 # qhasm: h10 = h10 ^ mem128[ ptr + 112 ] # asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2 # asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1 vpxor 112( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 160 ] = h10 # asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1) # asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi) movdqu % xmm1, 160( % rdi) # qhasm: h9 = h9 ^ mem128[ ptr + 288 ] # asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2 # asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1 vpxor 288( % r8), % xmm6, % xmm1 # qhasm: h9 = h9 ^ mem128[ ptr + 80 ] # asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2 # asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1 vpxor 80( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 144 ] = h9 # asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1) # asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi) movdqu % xmm1, 144( % rdi) # qhasm: h8 = h8 ^ mem128[ ptr + 256 ] # asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2 # asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1 vpxor 256( % r8), % xmm7, % xmm1 # qhasm: h8 = h8 ^ mem128[ ptr + 48 ] # asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2 # asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1 vpxor 48( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 128 ] = h8 # asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1) # asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi) movdqu % xmm1, 128( % rdi) # qhasm: h7 = h7 ^ mem128[ ptr + 224 ] # asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2 # asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1 vpxor 224( % r8), % xmm8, % xmm1 # qhasm: h7 = h7 ^ mem128[ ptr + 16 ] # asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2 # asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1 vpxor 16( % r8), % xmm1, % xmm1 # qhasm: mem128[ input_0 + 112 ] = h7 # asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1) # asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi) movdqu % xmm1, 112( % rdi) # qhasm: h6 = h6 ^ mem128[ ptr + 192 ] # asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1 # asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0 vpxor 192( % r8), % xmm0, % xmm0 # qhasm: mem128[ input_0 + 96 ] = h6 # asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1) # asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi) movdqu % xmm0, 96( % rdi) # qhasm: h5 = h5 ^ mem128[ ptr + 160 ] # asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1 # asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0 vpxor 160( % r8), % xmm3, % xmm0 # qhasm: mem128[ input_0 + 80 ] = h5 # asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1) # asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi) movdqu % xmm0, 80( % rdi) # qhasm: h4 = h4 ^ mem128[ ptr + 128 ] # asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1 # asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0 vpxor 128( % r8), % xmm4, % xmm0 # qhasm: mem128[ input_0 + 64 ] = h4 # asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1) # asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi) movdqu % xmm0, 64( % rdi) # qhasm: h3 = h3 ^ mem128[ ptr + 96 ] # asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1 # asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0 vpxor 96( % r8), % xmm13, % xmm0 # qhasm: mem128[ input_0 + 48 ] = h3 # asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1) # asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi) movdqu % xmm0, 48( % rdi) # qhasm: h2 = h2 ^ mem128[ ptr + 64 ] # asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1 # asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0 vpxor 64( % r8), % xmm9, % xmm0 # qhasm: mem128[ input_0 + 32 ] = h2 # asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1) # asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi) movdqu % xmm0, 32( % rdi) # qhasm: h1 = h1 ^ mem128[ ptr + 32 ] # asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1 # asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0 vpxor 32( % r8), % xmm10, % xmm0 # qhasm: mem128[ input_0 + 16 ] = h1 # asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1) # asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi) movdqu % xmm0, 16( % rdi) # qhasm: h0 = h0 ^ mem128[ ptr + 0 ] # asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1 # asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0 vpxor 0( % r8), % xmm11, % xmm0 # qhasm: mem128[ input_0 + 0 ] = h0 # asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1) # asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi) movdqu % xmm0, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
11,545
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/vec_reduce_asm.S
#include "namespace.h" #define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm) #define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 c # qhasm: int64 r # qhasm: enter vec_reduce_asm .p2align 5 .global _vec_reduce_asm .global vec_reduce_asm _vec_reduce_asm: vec_reduce_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: r = 0 # asm 1: mov $0,>r=int64#7 # asm 2: mov $0,>r=%rax mov $0, % rax # qhasm: t0 = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>t0=int64#2 # asm 2: movq 192(<input_0=%rdi),>t0=%rsi movq 192( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>t1=int64#3 # asm 2: movq 200(<input_0=%rdi),>t1=%rdx movq 200( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 176 ] # asm 1: movq 176(<input_0=int64#1),>t0=int64#2 # asm 2: movq 176(<input_0=%rdi),>t0=%rsi movq 176( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 184 ] # asm 1: movq 184(<input_0=int64#1),>t1=int64#3 # asm 2: movq 184(<input_0=%rdi),>t1=%rdx movq 184( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 160 ] # asm 1: movq 160(<input_0=int64#1),>t0=int64#2 # asm 2: movq 160(<input_0=%rdi),>t0=%rsi movq 160( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 168 ] # asm 1: movq 168(<input_0=int64#1),>t1=int64#3 # asm 2: movq 168(<input_0=%rdi),>t1=%rdx movq 168( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 144 ] # asm 1: movq 144(<input_0=int64#1),>t0=int64#2 # asm 2: movq 144(<input_0=%rdi),>t0=%rsi movq 144( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 152 ] # asm 1: movq 152(<input_0=int64#1),>t1=int64#3 # asm 2: movq 152(<input_0=%rdi),>t1=%rdx movq 152( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 128 ] # asm 1: movq 128(<input_0=int64#1),>t0=int64#2 # asm 2: movq 128(<input_0=%rdi),>t0=%rsi movq 128( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 136 ] # asm 1: movq 136(<input_0=int64#1),>t1=int64#3 # asm 2: movq 136(<input_0=%rdi),>t1=%rdx movq 136( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 112 ] # asm 1: movq 112(<input_0=int64#1),>t0=int64#2 # asm 2: movq 112(<input_0=%rdi),>t0=%rsi movq 112( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 120 ] # asm 1: movq 120(<input_0=int64#1),>t1=int64#3 # asm 2: movq 120(<input_0=%rdi),>t1=%rdx movq 120( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 96 ] # asm 1: movq 96(<input_0=int64#1),>t0=int64#2 # asm 2: movq 96(<input_0=%rdi),>t0=%rsi movq 96( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 104 ] # asm 1: movq 104(<input_0=int64#1),>t1=int64#3 # asm 2: movq 104(<input_0=%rdi),>t1=%rdx movq 104( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 80 ] # asm 1: movq 80(<input_0=int64#1),>t0=int64#2 # asm 2: movq 80(<input_0=%rdi),>t0=%rsi movq 80( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 88 ] # asm 1: movq 88(<input_0=int64#1),>t1=int64#3 # asm 2: movq 88(<input_0=%rdi),>t1=%rdx movq 88( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 64 ] # asm 1: movq 64(<input_0=int64#1),>t0=int64#2 # asm 2: movq 64(<input_0=%rdi),>t0=%rsi movq 64( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 72 ] # asm 1: movq 72(<input_0=int64#1),>t1=int64#3 # asm 2: movq 72(<input_0=%rdi),>t1=%rdx movq 72( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 48 ] # asm 1: movq 48(<input_0=int64#1),>t0=int64#2 # asm 2: movq 48(<input_0=%rdi),>t0=%rsi movq 48( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 56 ] # asm 1: movq 56(<input_0=int64#1),>t1=int64#3 # asm 2: movq 56(<input_0=%rdi),>t1=%rdx movq 56( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 32 ] # asm 1: movq 32(<input_0=int64#1),>t0=int64#2 # asm 2: movq 32(<input_0=%rdi),>t0=%rsi movq 32( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 40 ] # asm 1: movq 40(<input_0=int64#1),>t1=int64#3 # asm 2: movq 40(<input_0=%rdi),>t1=%rdx movq 40( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 16 ] # asm 1: movq 16(<input_0=int64#1),>t0=int64#2 # asm 2: movq 16(<input_0=%rdi),>t0=%rsi movq 16( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 24 ] # asm 1: movq 24(<input_0=int64#1),>t1=int64#3 # asm 2: movq 24(<input_0=%rdi),>t1=%rdx movq 24( % rdi), % rdx # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#3,<t0=int64#2 # asm 2: xor <t1=%rdx,<t0=%rsi xor % rdx, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#2 # asm 2: popcnt <t0=%rsi, >c=%rsi popcnt % rsi, % rsi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#2d # asm 2: and $1,<c=%esi and $1, % esi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#2,<r=int64#7 # asm 2: or <c=%rsi,<r=%rax or % rsi, % rax # qhasm: t0 = mem64[ input_0 + 0 ] # asm 1: movq 0(<input_0=int64#1),>t0=int64#2 # asm 2: movq 0(<input_0=%rdi),>t0=%rsi movq 0( % rdi), % rsi # qhasm: t1 = mem64[ input_0 + 8 ] # asm 1: movq 8(<input_0=int64#1),>t1=int64#1 # asm 2: movq 8(<input_0=%rdi),>t1=%rdi movq 8( % rdi), % rdi # qhasm: t0 ^= t1 # asm 1: xor <t1=int64#1,<t0=int64#2 # asm 2: xor <t1=%rdi,<t0=%rsi xor % rdi, % rsi # qhasm: c = count(t0) # asm 1: popcnt <t0=int64#2, >c=int64#1 # asm 2: popcnt <t0=%rsi, >c=%rdi popcnt % rsi, % rdi # qhasm: (uint32) c &= 1 # asm 1: and $1,<c=int64#1d # asm 2: and $1,<c=%edi and $1, % edi # qhasm: r <<= 1 # asm 1: shl $1,<r=int64#7 # asm 2: shl $1,<r=%rax shl $1, % rax # qhasm: r |= c # asm 1: or <c=int64#1,<r=int64#7 # asm 2: or <c=%rdi,<r=%rax or % rdi, % rax # qhasm: return r add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
22,917
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/syndrome_asm.S
#include "namespace.h" #define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm) #define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: int64 b64 # qhasm: int64 synd # qhasm: int64 addr # qhasm: int64 c # qhasm: int64 c_all # qhasm: int64 row # qhasm: int64 p # qhasm: int64 e # qhasm: int64 s # qhasm: reg256 pp # qhasm: reg256 ee # qhasm: reg256 ss # qhasm: int64 buf_ptr # qhasm: stack256 buf # qhasm: enter syndrome_asm .p2align 5 .global _syndrome_asm .global syndrome_asm _syndrome_asm: syndrome_asm: mov % rsp, % r11 and $31, % r11 add $32, % r11 sub % r11, % rsp # qhasm: input_1 += 1044364 # asm 1: add $1044364,<input_1=int64#2 # asm 2: add $1044364,<input_1=%rsi add $1044364, % rsi # qhasm: buf_ptr = &buf # asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4 # asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx leaq 0( % rsp), % rcx # qhasm: row = 1664 # asm 1: mov $1664,>row=int64#5 # asm 2: mov $1664,>row=%r8 mov $1664, % r8 # qhasm: loop: ._loop: # qhasm: row -= 1 # asm 1: sub $1,<row=int64#5 # asm 2: sub $1,<row=%r8 sub $1, % r8 # qhasm: ss = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1 # asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0 vmovupd 0( % rsi), % ymm0 # qhasm: ee = mem256[ input_2 + 208 ] # asm 1: vmovupd 208(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 208(<input_2=%rdx),>ee=%ymm1 vmovupd 208( % rdx), % ymm1 # qhasm: ss &= ee # asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpand % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1 vmovupd 32( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 240 ] # asm 1: vmovupd 240(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 240(<input_2=%rdx),>ee=%ymm2 vmovupd 240( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1 vmovupd 64( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 272 ] # asm 1: vmovupd 272(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 272(<input_2=%rdx),>ee=%ymm2 vmovupd 272( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1 vmovupd 96( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 304 ] # asm 1: vmovupd 304(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 304(<input_2=%rdx),>ee=%ymm2 vmovupd 304( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1 vmovupd 128( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 336 ] # asm 1: vmovupd 336(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 336(<input_2=%rdx),>ee=%ymm2 vmovupd 336( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1 vmovupd 160( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 368 ] # asm 1: vmovupd 368(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 368(<input_2=%rdx),>ee=%ymm2 vmovupd 368( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1 vmovupd 192( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 400 ] # asm 1: vmovupd 400(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 400(<input_2=%rdx),>ee=%ymm2 vmovupd 400( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1 vmovupd 224( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 432 ] # asm 1: vmovupd 432(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 432(<input_2=%rdx),>ee=%ymm2 vmovupd 432( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1 vmovupd 256( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 464 ] # asm 1: vmovupd 464(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 464(<input_2=%rdx),>ee=%ymm2 vmovupd 464( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1 vmovupd 288( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 496 ] # asm 1: vmovupd 496(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 496(<input_2=%rdx),>ee=%ymm2 vmovupd 496( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1 vmovupd 320( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 528 ] # asm 1: vmovupd 528(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 528(<input_2=%rdx),>ee=%ymm2 vmovupd 528( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1 vmovupd 352( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 560 ] # asm 1: vmovupd 560(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 560(<input_2=%rdx),>ee=%ymm2 vmovupd 560( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 592 ] # asm 1: vmovupd 592(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 592(<input_2=%rdx),>ee=%ymm2 vmovupd 592( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 416 ] # asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1 vmovupd 416( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 624 ] # asm 1: vmovupd 624(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 624(<input_2=%rdx),>ee=%ymm2 vmovupd 624( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 448 ] # asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1 vmovupd 448( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 656 ] # asm 1: vmovupd 656(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 656(<input_2=%rdx),>ee=%ymm2 vmovupd 656( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 480 ] # asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1 vmovupd 480( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 688 ] # asm 1: vmovupd 688(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 688(<input_2=%rdx),>ee=%ymm2 vmovupd 688( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 512 ] # asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1 vmovupd 512( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 720 ] # asm 1: vmovupd 720(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 720(<input_2=%rdx),>ee=%ymm2 vmovupd 720( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 544 ] # asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1 vmovupd 544( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 752 ] # asm 1: vmovupd 752(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 752(<input_2=%rdx),>ee=%ymm2 vmovupd 752( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: pp = mem256[ input_1 + 576 ] # asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2 # asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1 vmovupd 576( % rsi), % ymm1 # qhasm: ee = mem256[ input_2 + 784 ] # asm 1: vmovupd 784(<input_2=int64#3),>ee=reg256#3 # asm 2: vmovupd 784(<input_2=%rdx),>ee=%ymm2 vmovupd 784( % rdx), % ymm2 # qhasm: pp &= ee # asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2 # asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1 vpand % ymm2, % ymm1, % ymm1 # qhasm: ss ^= pp # asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: buf = ss # asm 1: vmovapd <ss=reg256#1,>buf=stack256#1 # asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp) vmovapd % ymm0, 0( % rsp) # qhasm: s = mem64[input_1 + 608] # asm 1: movq 608(<input_1=int64#2),>s=int64#6 # asm 2: movq 608(<input_1=%rsi),>s=%r9 movq 608( % rsi), % r9 # qhasm: e = mem64[input_2 + 816] # asm 1: movq 816(<input_2=int64#3),>e=int64#7 # asm 2: movq 816(<input_2=%rdx),>e=%rax movq 816( % rdx), % rax # qhasm: s &= e # asm 1: and <e=int64#7,<s=int64#6 # asm 2: and <e=%rax,<s=%r9 and % rax, % r9 # qhasm: p = mem64[input_1 + 616] # asm 1: movq 616(<input_1=int64#2),>p=int64#7 # asm 2: movq 616(<input_1=%rsi),>p=%rax movq 616( % rsi), % rax # qhasm: e = mem64[input_2 + 824] # asm 1: movq 824(<input_2=int64#3),>e=int64#8 # asm 2: movq 824(<input_2=%rdx),>e=%r10 movq 824( % rdx), % r10 # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: p = *(uint32 *)(input_1 + 624) # asm 1: movl 624(<input_1=int64#2),>p=int64#7d # asm 2: movl 624(<input_1=%rsi),>p=%eax movl 624( % rsi), % eax # qhasm: e = *(uint32 *)(input_2 + 832) # asm 1: movl 832(<input_2=int64#3),>e=int64#8d # asm 2: movl 832(<input_2=%rdx),>e=%r10d movl 832( % rdx), % r10d # qhasm: p &= e # asm 1: and <e=int64#8,<p=int64#7 # asm 2: and <e=%r10,<p=%rax and % r10, % rax # qhasm: s ^= p # asm 1: xor <p=int64#7,<s=int64#6 # asm 2: xor <p=%rax,<s=%r9 xor % rax, % r9 # qhasm: c_all = count(s) # asm 1: popcnt <s=int64#6, >c_all=int64#6 # asm 2: popcnt <s=%r9, >c_all=%r9 popcnt % r9, % r9 # qhasm: b64 = mem64[ buf_ptr + 0 ] # asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax movq 0( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 8 ] # asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax movq 8( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 16 ] # asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax movq 16( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: b64 = mem64[ buf_ptr + 24 ] # asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7 # asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax movq 24( % rcx), % rax # qhasm: c = count(b64) # asm 1: popcnt <b64=int64#7, >c=int64#7 # asm 2: popcnt <b64=%rax, >c=%rax popcnt % rax, % rax # qhasm: c_all ^= c # asm 1: xor <c=int64#7,<c_all=int64#6 # asm 2: xor <c=%rax,<c_all=%r9 xor % rax, % r9 # qhasm: addr = row # asm 1: mov <row=int64#5,>addr=int64#7 # asm 2: mov <row=%r8,>addr=%rax mov % r8, % rax # qhasm: (uint64) addr >>= 3 # asm 1: shr $3,<addr=int64#7 # asm 2: shr $3,<addr=%rax shr $3, % rax # qhasm: addr += input_0 # asm 1: add <input_0=int64#1,<addr=int64#7 # asm 2: add <input_0=%rdi,<addr=%rax add % rdi, % rax # qhasm: synd = *(uint8 *) (addr + 0) # asm 1: movzbq 0(<addr=int64#7),>synd=int64#8 # asm 2: movzbq 0(<addr=%rax),>synd=%r10 movzbq 0( % rax), % r10 # qhasm: synd <<= 1 # asm 1: shl $1,<synd=int64#8 # asm 2: shl $1,<synd=%r10 shl $1, % r10 # qhasm: (uint32) c_all &= 1 # asm 1: and $1,<c_all=int64#6d # asm 2: and $1,<c_all=%r9d and $1, % r9d # qhasm: synd |= c_all # asm 1: or <c_all=int64#6,<synd=int64#8 # asm 2: or <c_all=%r9,<synd=%r10 or % r9, % r10 # qhasm: *(uint8 *) (addr + 0) = synd # asm 1: movb <synd=int64#8b,0(<addr=int64#7) # asm 2: movb <synd=%r10b,0(<addr=%rax) movb % r10b, 0( % rax) # qhasm: input_1 -= 628 # asm 1: sub $628,<input_1=int64#2 # asm 2: sub $628,<input_1=%rsi sub $628, % rsi # qhasm: =? row-0 # asm 1: cmp $0,<row=int64#5 # asm 2: cmp $0,<row=%r8 cmp $0, % r8 # comment:fp stack unchanged by jump # qhasm: goto loop if != jne ._loop # qhasm: ss = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0 vmovupd 0( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1 vmovupd 0( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 0 ] = ss # asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: ss = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0 vmovupd 32( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 32 ] # asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1 vmovupd 32( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 32 ] = ss # asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: ss = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0 vmovupd 64( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 64 ] # asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1 vmovupd 64( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 64 ] = ss # asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: ss = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0 vmovupd 96( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 96 ] # asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1 vmovupd 96( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 96 ] = ss # asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: ss = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 128(<input_0=%rdi),>ss=%ymm0 vmovupd 128( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 128 ] # asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm1 vmovupd 128( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 128 ] = ss # asm 1: vmovupd <ss=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: ss = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>ss=reg256#1 # asm 2: vmovupd 160(<input_0=%rdi),>ss=%ymm0 vmovupd 160( % rdi), % ymm0 # qhasm: ee = mem256[ input_2 + 160 ] # asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#2 # asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm1 vmovupd 160( % rdx), % ymm1 # qhasm: ss ^= ee # asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1 # asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0 vpxor % ymm1, % ymm0, % ymm0 # qhasm: mem256[ input_0 + 160 ] = ss # asm 1: vmovupd <ss=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <ss=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: s = mem64[ input_0 + 192 ] # asm 1: movq 192(<input_0=int64#1),>s=int64#2 # asm 2: movq 192(<input_0=%rdi),>s=%rsi movq 192( % rdi), % rsi # qhasm: e = mem64[ input_2 + 192 ] # asm 1: movq 192(<input_2=int64#3),>e=int64#4 # asm 2: movq 192(<input_2=%rdx),>e=%rcx movq 192( % rdx), % rcx # qhasm: s ^= e # asm 1: xor <e=int64#4,<s=int64#2 # asm 2: xor <e=%rcx,<s=%rsi xor % rcx, % rsi # qhasm: mem64[ input_0 + 192 ] = s # asm 1: movq <s=int64#2,192(<input_0=int64#1) # asm 2: movq <s=%rsi,192(<input_0=%rdi) movq % rsi, 192( % rdi) # qhasm: s = mem64[ input_0 + 200 ] # asm 1: movq 200(<input_0=int64#1),>s=int64#2 # asm 2: movq 200(<input_0=%rdi),>s=%rsi movq 200( % rdi), % rsi # qhasm: e = mem64[ input_2 + 200 ] # asm 1: movq 200(<input_2=int64#3),>e=int64#3 # asm 2: movq 200(<input_2=%rdx),>e=%rdx movq 200( % rdx), % rdx # qhasm: s ^= e # asm 1: xor <e=int64#3,<s=int64#2 # asm 2: xor <e=%rdx,<s=%rsi xor % rdx, % rsi # qhasm: mem64[ input_0 + 200 ] = s # asm 1: movq <s=int64#2,200(<input_0=int64#1) # asm 2: movq <s=%rsi,200(<input_0=%rdi) movq % rsi, 200( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
254,430
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/transpose_64x128_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm) #define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg128 x0 # qhasm: reg128 x1 # qhasm: reg128 x2 # qhasm: reg128 x3 # qhasm: reg128 x4 # qhasm: reg128 x5 # qhasm: reg128 x6 # qhasm: reg128 x7 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 v00 # qhasm: reg128 v01 # qhasm: reg128 v10 # qhasm: reg128 v11 # qhasm: reg128 mask0 # qhasm: reg128 mask1 # qhasm: reg128 mask2 # qhasm: reg128 mask3 # qhasm: reg128 mask4 # qhasm: reg128 mask5 # qhasm: enter transpose_64x128_sp_asm .p2align 5 .global _transpose_64x128_sp_asm .global transpose_64x128_sp_asm _transpose_64x128_sp_asm: transpose_64x128_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem128[ MASK5_0 ] # asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0 movdqa MASK5_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK5_1 ] # asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1 movdqa MASK5_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK4_0 ] # asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2 movdqa MASK4_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK4_1 ] # asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3 movdqa MASK4_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK3_0 ] # asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4 movdqa MASK3_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK3_1 ] # asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5 movdqa MASK3_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7 movdqu 128( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8 movdqu 256( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9 movdqu 384( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10 movdqu 512( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11 movdqu 640( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12 movdqu 768( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13 movdqu 896( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 128 ] = x1 # asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi) movdqu % xmm13, 128( % rdi) # qhasm: mem128[ input_0 + 256 ] = x2 # asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi) movdqu % xmm14, 256( % rdi) # qhasm: mem128[ input_0 + 384 ] = x3 # asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi) movdqu % xmm10, 384( % rdi) # qhasm: mem128[ input_0 + 512 ] = x4 # asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi) movdqu % xmm11, 512( % rdi) # qhasm: mem128[ input_0 + 640 ] = x5 # asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi) movdqu % xmm8, 640( % rdi) # qhasm: mem128[ input_0 + 768 ] = x6 # asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi) movdqu % xmm12, 768( % rdi) # qhasm: mem128[ input_0 + 896 ] = x7 # asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi) movdqu % xmm6, 896( % rdi) # qhasm: x0 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6 movdqu 16( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8 movdqu 272( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9 movdqu 400( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10 movdqu 528( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11 movdqu 656( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12 movdqu 784( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13 movdqu 912( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 16 ] = x0 # asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi) movdqu % xmm9, 16( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 272 ] = x2 # asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi) movdqu % xmm14, 272( % rdi) # qhasm: mem128[ input_0 + 400 ] = x3 # asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi) movdqu % xmm10, 400( % rdi) # qhasm: mem128[ input_0 + 528 ] = x4 # asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi) movdqu % xmm11, 528( % rdi) # qhasm: mem128[ input_0 + 656 ] = x5 # asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi) movdqu % xmm8, 656( % rdi) # qhasm: mem128[ input_0 + 784 ] = x6 # asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi) movdqu % xmm12, 784( % rdi) # qhasm: mem128[ input_0 + 912 ] = x7 # asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi) movdqu % xmm6, 912( % rdi) # qhasm: x0 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6 movdqu 32( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7 movdqu 160( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9 movdqu 416( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10 movdqu 544( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11 movdqu 672( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12 movdqu 800( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13 movdqu 928( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 32 ] = x0 # asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi) movdqu % xmm9, 32( % rdi) # qhasm: mem128[ input_0 + 160 ] = x1 # asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi) movdqu % xmm13, 160( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 416 ] = x3 # asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi) movdqu % xmm10, 416( % rdi) # qhasm: mem128[ input_0 + 544 ] = x4 # asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi) movdqu % xmm11, 544( % rdi) # qhasm: mem128[ input_0 + 672 ] = x5 # asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi) movdqu % xmm8, 672( % rdi) # qhasm: mem128[ input_0 + 800 ] = x6 # asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi) movdqu % xmm12, 800( % rdi) # qhasm: mem128[ input_0 + 928 ] = x7 # asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi) movdqu % xmm6, 928( % rdi) # qhasm: x0 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6 movdqu 48( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7 movdqu 176( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8 movdqu 304( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10 movdqu 560( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11 movdqu 688( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12 movdqu 816( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13 movdqu 944( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 48 ] = x0 # asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi) movdqu % xmm9, 48( % rdi) # qhasm: mem128[ input_0 + 176 ] = x1 # asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi) movdqu % xmm13, 176( % rdi) # qhasm: mem128[ input_0 + 304 ] = x2 # asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi) movdqu % xmm14, 304( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 560 ] = x4 # asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi) movdqu % xmm11, 560( % rdi) # qhasm: mem128[ input_0 + 688 ] = x5 # asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi) movdqu % xmm8, 688( % rdi) # qhasm: mem128[ input_0 + 816 ] = x6 # asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi) movdqu % xmm12, 816( % rdi) # qhasm: mem128[ input_0 + 944 ] = x7 # asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi) movdqu % xmm6, 944( % rdi) # qhasm: x0 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6 movdqu 64( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7 movdqu 192( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8 movdqu 320( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9 movdqu 448( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11 movdqu 704( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12 movdqu 832( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13 movdqu 960( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 64 ] = x0 # asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi) movdqu % xmm9, 64( % rdi) # qhasm: mem128[ input_0 + 192 ] = x1 # asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi) movdqu % xmm13, 192( % rdi) # qhasm: mem128[ input_0 + 320 ] = x2 # asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi) movdqu % xmm14, 320( % rdi) # qhasm: mem128[ input_0 + 448 ] = x3 # asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi) movdqu % xmm10, 448( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 704 ] = x5 # asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi) movdqu % xmm8, 704( % rdi) # qhasm: mem128[ input_0 + 832 ] = x6 # asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi) movdqu % xmm12, 832( % rdi) # qhasm: mem128[ input_0 + 960 ] = x7 # asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi) movdqu % xmm6, 960( % rdi) # qhasm: x0 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6 movdqu 80( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7 movdqu 208( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8 movdqu 336( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9 movdqu 464( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10 movdqu 592( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12 movdqu 848( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13 movdqu 976( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 80 ] = x0 # asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi) movdqu % xmm9, 80( % rdi) # qhasm: mem128[ input_0 + 208 ] = x1 # asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi) movdqu % xmm13, 208( % rdi) # qhasm: mem128[ input_0 + 336 ] = x2 # asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi) movdqu % xmm14, 336( % rdi) # qhasm: mem128[ input_0 + 464 ] = x3 # asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi) movdqu % xmm10, 464( % rdi) # qhasm: mem128[ input_0 + 592 ] = x4 # asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi) movdqu % xmm11, 592( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 848 ] = x6 # asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi) movdqu % xmm12, 848( % rdi) # qhasm: mem128[ input_0 + 976 ] = x7 # asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi) movdqu % xmm6, 976( % rdi) # qhasm: x0 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6 movdqu 96( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7 movdqu 224( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8 movdqu 352( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9 movdqu 480( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10 movdqu 608( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11 movdqu 736( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13 movdqu 992( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15 vpsllq $32, % xmm13, % xmm15 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15 vpslld $16, % xmm11, % xmm15 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14 vpsrld $16, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16 # asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15 vpslld $16, % xmm12, % xmm15 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15 vpslld $16, % xmm8, % xmm15 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16 # asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15 vpslld $16, % xmm9, % xmm15 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16 # asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15 vpsllw $8, % xmm14, % xmm15 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14 # asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13 vpsrlw $8, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16 # asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15 vpsllw $8, % xmm10, % xmm15 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16 # asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15 vpsllw $8, % xmm8, % xmm15 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13 # asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12 vpsrlw $8, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16 # asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15 vpsllw $8, % xmm7, % xmm15 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 96 ] = x0 # asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi) movdqu % xmm9, 96( % rdi) # qhasm: mem128[ input_0 + 224 ] = x1 # asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi) movdqu % xmm13, 224( % rdi) # qhasm: mem128[ input_0 + 352 ] = x2 # asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi) movdqu % xmm14, 352( % rdi) # qhasm: mem128[ input_0 + 480 ] = x3 # asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi) movdqu % xmm10, 480( % rdi) # qhasm: mem128[ input_0 + 608 ] = x4 # asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi) movdqu % xmm11, 608( % rdi) # qhasm: mem128[ input_0 + 736 ] = x5 # asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi) movdqu % xmm8, 736( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 992 ] = x7 # asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi) movdqu % xmm6, 992( % rdi) # qhasm: x0 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6 movdqu 112( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7 movdqu 240( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8 movdqu 368( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9 movdqu 496( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10 movdqu 624( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11 movdqu 752( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12 movdqu 880( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: 2x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16 # asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15 vpsllq $32, % xmm10, % xmm15 # qhasm: 2x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7 # asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6 vpsrlq $32, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: 2x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16 # asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15 vpsllq $32, % xmm11, % xmm15 # qhasm: 2x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8 # asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7 vpsrlq $32, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: 2x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16 # asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15 vpsllq $32, % xmm12, % xmm15 # qhasm: 2x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9 # asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8 vpsrlq $32, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0 vpand % xmm0, % xmm9, % xmm0 # qhasm: 2x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13 # asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12 vpsllq $32, % xmm13, % xmm12 # qhasm: 2x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10 # asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9 vpsrlq $32, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1 # asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0 vpor % xmm12, % xmm0, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: 4x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13 # asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12 vpslld $16, % xmm11, % xmm12 # qhasm: 4x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14 # asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13 vpsrld $16, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: 4x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14 # asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13 vpslld $16, % xmm0, % xmm13 # qhasm: 4x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11 # asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10 vpsrld $16, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: 4x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14 # asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13 vpslld $16, % xmm8, % xmm13 # qhasm: 4x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7 # asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6 vpsrld $16, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2 vpand % xmm2, % xmm7, % xmm2 # qhasm: 4x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9 # asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8 vpslld $16, % xmm1, % xmm8 # qhasm: 4x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8 # asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7 vpsrld $16, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3 # asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2 vpor % xmm8, % xmm2, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: 8x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8 # asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7 vpsllw $8, % xmm12, % xmm7 # qhasm: 8x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9 # asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8 vpsrlw $8, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: 8x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10 # asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9 vpsllw $8, % xmm0, % xmm9 # qhasm: 8x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12 # asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11 vpsrlw $8, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: 8x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12 # asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11 vpsllw $8, % xmm2, % xmm11 # qhasm: 8x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11 # asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10 vpsrlw $8, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4 vpand % xmm4, % xmm6, % xmm4 # qhasm: 8x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11 # asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10 vpsllw $8, % xmm1, % xmm10 # qhasm: 8x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7 # asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6 vpsrlw $8, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5 # asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4 vpor % xmm10, % xmm4, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 112 ] = x0 # asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi) movdqu % xmm3, 112( % rdi) # qhasm: mem128[ input_0 + 240 ] = x1 # asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi) movdqu % xmm7, 240( % rdi) # qhasm: mem128[ input_0 + 368 ] = x2 # asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi) movdqu % xmm8, 368( % rdi) # qhasm: mem128[ input_0 + 496 ] = x3 # asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi) movdqu % xmm0, 496( % rdi) # qhasm: mem128[ input_0 + 624 ] = x4 # asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi) movdqu % xmm9, 624( % rdi) # qhasm: mem128[ input_0 + 752 ] = x5 # asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi) movdqu % xmm2, 752( % rdi) # qhasm: mem128[ input_0 + 880 ] = x6 # asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi) movdqu % xmm4, 880( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: mask0 aligned= mem128[ MASK2_0 ] # asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1 # asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0 movdqa MASK2_0( % rip), % xmm0 # qhasm: mask1 aligned= mem128[ MASK2_1 ] # asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2 # asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1 movdqa MASK2_1( % rip), % xmm1 # qhasm: mask2 aligned= mem128[ MASK1_0 ] # asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3 # asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2 movdqa MASK1_0( % rip), % xmm2 # qhasm: mask3 aligned= mem128[ MASK1_1 ] # asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4 # asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3 movdqa MASK1_1( % rip), % xmm3 # qhasm: mask4 aligned= mem128[ MASK0_0 ] # asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5 # asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4 movdqa MASK0_0( % rip), % xmm4 # qhasm: mask5 aligned= mem128[ MASK0_1 ] # asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6 # asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5 movdqa MASK0_1( % rip), % xmm5 # qhasm: x0 = mem128[ input_0 + 0 ] # asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6 movdqu 0( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 16 ] # asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7 movdqu 16( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 32 ] # asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8 movdqu 32( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 48 ] # asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9 movdqu 48( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 64 ] # asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10 movdqu 64( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 80 ] # asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11 movdqu 80( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 96 ] # asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12 movdqu 96( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 112 ] # asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13 movdqu 112( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 0 ] = x0 # asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi) movdqu % xmm9, 0( % rdi) # qhasm: mem128[ input_0 + 16 ] = x1 # asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi) movdqu % xmm13, 16( % rdi) # qhasm: mem128[ input_0 + 32 ] = x2 # asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi) movdqu % xmm14, 32( % rdi) # qhasm: mem128[ input_0 + 48 ] = x3 # asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi) movdqu % xmm10, 48( % rdi) # qhasm: mem128[ input_0 + 64 ] = x4 # asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi) movdqu % xmm11, 64( % rdi) # qhasm: mem128[ input_0 + 80 ] = x5 # asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi) movdqu % xmm8, 80( % rdi) # qhasm: mem128[ input_0 + 96 ] = x6 # asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi) movdqu % xmm12, 96( % rdi) # qhasm: mem128[ input_0 + 112 ] = x7 # asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi) movdqu % xmm6, 112( % rdi) # qhasm: x0 = mem128[ input_0 + 128 ] # asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6 movdqu 128( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 144 ] # asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7 movdqu 144( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 160 ] # asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8 movdqu 160( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 176 ] # asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9 movdqu 176( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 192 ] # asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10 movdqu 192( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 208 ] # asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11 movdqu 208( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 224 ] # asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12 movdqu 224( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 240 ] # asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13 movdqu 240( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 128 ] = x0 # asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi) movdqu % xmm9, 128( % rdi) # qhasm: mem128[ input_0 + 144 ] = x1 # asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi) movdqu % xmm13, 144( % rdi) # qhasm: mem128[ input_0 + 160 ] = x2 # asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi) movdqu % xmm14, 160( % rdi) # qhasm: mem128[ input_0 + 176 ] = x3 # asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi) movdqu % xmm10, 176( % rdi) # qhasm: mem128[ input_0 + 192 ] = x4 # asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi) movdqu % xmm11, 192( % rdi) # qhasm: mem128[ input_0 + 208 ] = x5 # asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi) movdqu % xmm8, 208( % rdi) # qhasm: mem128[ input_0 + 224 ] = x6 # asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi) movdqu % xmm12, 224( % rdi) # qhasm: mem128[ input_0 + 240 ] = x7 # asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi) movdqu % xmm6, 240( % rdi) # qhasm: x0 = mem128[ input_0 + 256 ] # asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6 movdqu 256( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 272 ] # asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7 movdqu 272( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 288 ] # asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8 movdqu 288( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 304 ] # asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9 movdqu 304( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 320 ] # asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10 movdqu 320( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 336 ] # asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11 movdqu 336( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 352 ] # asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12 movdqu 352( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 368 ] # asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13 movdqu 368( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 256 ] = x0 # asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi) movdqu % xmm9, 256( % rdi) # qhasm: mem128[ input_0 + 272 ] = x1 # asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi) movdqu % xmm13, 272( % rdi) # qhasm: mem128[ input_0 + 288 ] = x2 # asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi) movdqu % xmm14, 288( % rdi) # qhasm: mem128[ input_0 + 304 ] = x3 # asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi) movdqu % xmm10, 304( % rdi) # qhasm: mem128[ input_0 + 320 ] = x4 # asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi) movdqu % xmm11, 320( % rdi) # qhasm: mem128[ input_0 + 336 ] = x5 # asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi) movdqu % xmm8, 336( % rdi) # qhasm: mem128[ input_0 + 352 ] = x6 # asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi) movdqu % xmm12, 352( % rdi) # qhasm: mem128[ input_0 + 368 ] = x7 # asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi) movdqu % xmm6, 368( % rdi) # qhasm: x0 = mem128[ input_0 + 384 ] # asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6 movdqu 384( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 400 ] # asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7 movdqu 400( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 416 ] # asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8 movdqu 416( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 432 ] # asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9 movdqu 432( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 448 ] # asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10 movdqu 448( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 464 ] # asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11 movdqu 464( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 480 ] # asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12 movdqu 480( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 496 ] # asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13 movdqu 496( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 384 ] = x0 # asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi) movdqu % xmm9, 384( % rdi) # qhasm: mem128[ input_0 + 400 ] = x1 # asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi) movdqu % xmm13, 400( % rdi) # qhasm: mem128[ input_0 + 416 ] = x2 # asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi) movdqu % xmm14, 416( % rdi) # qhasm: mem128[ input_0 + 432 ] = x3 # asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi) movdqu % xmm10, 432( % rdi) # qhasm: mem128[ input_0 + 448 ] = x4 # asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi) movdqu % xmm11, 448( % rdi) # qhasm: mem128[ input_0 + 464 ] = x5 # asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi) movdqu % xmm8, 464( % rdi) # qhasm: mem128[ input_0 + 480 ] = x6 # asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi) movdqu % xmm12, 480( % rdi) # qhasm: mem128[ input_0 + 496 ] = x7 # asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi) movdqu % xmm6, 496( % rdi) # qhasm: x0 = mem128[ input_0 + 512 ] # asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6 movdqu 512( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 528 ] # asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7 movdqu 528( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 544 ] # asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8 movdqu 544( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 560 ] # asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9 movdqu 560( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 576 ] # asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10 movdqu 576( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 592 ] # asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11 movdqu 592( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 608 ] # asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12 movdqu 608( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 624 ] # asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13 movdqu 624( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 512 ] = x0 # asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi) movdqu % xmm9, 512( % rdi) # qhasm: mem128[ input_0 + 528 ] = x1 # asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi) movdqu % xmm13, 528( % rdi) # qhasm: mem128[ input_0 + 544 ] = x2 # asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi) movdqu % xmm14, 544( % rdi) # qhasm: mem128[ input_0 + 560 ] = x3 # asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi) movdqu % xmm10, 560( % rdi) # qhasm: mem128[ input_0 + 576 ] = x4 # asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi) movdqu % xmm11, 576( % rdi) # qhasm: mem128[ input_0 + 592 ] = x5 # asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi) movdqu % xmm8, 592( % rdi) # qhasm: mem128[ input_0 + 608 ] = x6 # asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi) movdqu % xmm12, 608( % rdi) # qhasm: mem128[ input_0 + 624 ] = x7 # asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi) movdqu % xmm6, 624( % rdi) # qhasm: x0 = mem128[ input_0 + 640 ] # asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6 movdqu 640( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 656 ] # asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7 movdqu 656( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 672 ] # asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8 movdqu 672( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 688 ] # asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9 movdqu 688( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 704 ] # asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10 movdqu 704( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 720 ] # asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11 movdqu 720( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 736 ] # asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12 movdqu 736( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 752 ] # asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13 movdqu 752( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 640 ] = x0 # asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi) movdqu % xmm9, 640( % rdi) # qhasm: mem128[ input_0 + 656 ] = x1 # asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi) movdqu % xmm13, 656( % rdi) # qhasm: mem128[ input_0 + 672 ] = x2 # asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi) movdqu % xmm14, 672( % rdi) # qhasm: mem128[ input_0 + 688 ] = x3 # asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi) movdqu % xmm10, 688( % rdi) # qhasm: mem128[ input_0 + 704 ] = x4 # asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi) movdqu % xmm11, 704( % rdi) # qhasm: mem128[ input_0 + 720 ] = x5 # asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi) movdqu % xmm8, 720( % rdi) # qhasm: mem128[ input_0 + 736 ] = x6 # asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi) movdqu % xmm12, 736( % rdi) # qhasm: mem128[ input_0 + 752 ] = x7 # asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi) movdqu % xmm6, 752( % rdi) # qhasm: x0 = mem128[ input_0 + 768 ] # asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6 movdqu 768( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 784 ] # asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7 movdqu 784( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 800 ] # asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8 movdqu 800( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 816 ] # asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9 movdqu 816( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 832 ] # asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10 movdqu 832( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 848 ] # asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11 movdqu 848( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 864 ] # asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12 movdqu 864( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 880 ] # asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13 movdqu 880( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15 vpand % xmm0, % xmm13, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13 vpand % xmm1, % xmm13, % xmm13 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10 # asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9 vpor % xmm13, % xmm9, % xmm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13 vpand % xmm2, % xmm14, % xmm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15 vpand % xmm2, % xmm11, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14 vpand % xmm3, % xmm14, % xmm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#15 # asm 2: psrlq $2,<v01=%xmm14 psrlq $2, % xmm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14 # asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13 vpor % xmm15, % xmm13, % xmm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11 vpor % xmm11, % xmm14, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14 vpand % xmm2, % xmm10, % xmm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15 vpand % xmm2, % xmm12, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13 # asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12 vpand % xmm3, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11 # asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10 vpor % xmm12, % xmm10, % xmm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12 vpand % xmm2, % xmm6, % xmm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15 vpand % xmm2, % xmm8, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16 # asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15 vpand % xmm2, % xmm9, % xmm15 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#16 # asm 2: psllq $2,<v10=%xmm15 psllq $2, % xmm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10 # asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9 vpand % xmm3, % xmm9, % xmm9 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9 # asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8 vpor % xmm15, % xmm8, % xmm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7 vpor % xmm9, % xmm7, % xmm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9 vpand % xmm4, % xmm13, % xmm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15 vpand % xmm4, % xmm14, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14 # asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13 vpand % xmm5, % xmm13, % xmm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15 # asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14 vpand % xmm5, % xmm14, % xmm14 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#14 # asm 2: psrlq $1,<v01=%xmm13 psrlq $1, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9 vpor % xmm15, % xmm9, % xmm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14 # asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13 vpor % xmm14, % xmm13, % xmm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14 vpand % xmm4, % xmm11, % xmm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15 vpand % xmm4, % xmm10, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11 # asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11 # asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10 vpor % xmm10, % xmm11, % xmm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12 # asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11 vpand % xmm4, % xmm12, % xmm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15 vpand % xmm4, % xmm8, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13 # asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12 vpand % xmm5, % xmm12, % xmm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9 # asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8 vpand % xmm5, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#13 # asm 2: psrlq $1,<v01=%xmm12 psrlq $1, % xmm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9 # asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8 vpor % xmm8, % xmm12, % xmm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12 vpand % xmm4, % xmm6, % xmm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16 # asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15 vpand % xmm4, % xmm7, % xmm15 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#16 # asm 2: psllq $1,<v10=%xmm15 psllq $1, % xmm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8 # asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7 vpand % xmm5, % xmm7, % xmm7 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13 # asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12 vpor % xmm15, % xmm12, % xmm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7 # asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6 vpor % xmm7, % xmm6, % xmm6 # qhasm: mem128[ input_0 + 768 ] = x0 # asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1) # asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi) movdqu % xmm9, 768( % rdi) # qhasm: mem128[ input_0 + 784 ] = x1 # asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1) # asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi) movdqu % xmm13, 784( % rdi) # qhasm: mem128[ input_0 + 800 ] = x2 # asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1) # asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi) movdqu % xmm14, 800( % rdi) # qhasm: mem128[ input_0 + 816 ] = x3 # asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1) # asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi) movdqu % xmm10, 816( % rdi) # qhasm: mem128[ input_0 + 832 ] = x4 # asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1) # asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi) movdqu % xmm11, 832( % rdi) # qhasm: mem128[ input_0 + 848 ] = x5 # asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1) # asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi) movdqu % xmm8, 848( % rdi) # qhasm: mem128[ input_0 + 864 ] = x6 # asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1) # asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi) movdqu % xmm12, 864( % rdi) # qhasm: mem128[ input_0 + 880 ] = x7 # asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1) # asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi) movdqu % xmm6, 880( % rdi) # qhasm: x0 = mem128[ input_0 + 896 ] # asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7 # asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6 movdqu 896( % rdi), % xmm6 # qhasm: x1 = mem128[ input_0 + 912 ] # asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8 # asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7 movdqu 912( % rdi), % xmm7 # qhasm: x2 = mem128[ input_0 + 928 ] # asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9 # asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8 movdqu 928( % rdi), % xmm8 # qhasm: x3 = mem128[ input_0 + 944 ] # asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10 # asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9 movdqu 944( % rdi), % xmm9 # qhasm: x4 = mem128[ input_0 + 960 ] # asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11 # asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10 movdqu 960( % rdi), % xmm10 # qhasm: x5 = mem128[ input_0 + 976 ] # asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12 # asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11 movdqu 976( % rdi), % xmm11 # qhasm: x6 = mem128[ input_0 + 992 ] # asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13 # asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12 movdqu 992( % rdi), % xmm12 # qhasm: x7 = mem128[ input_0 + 1008 ] # asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14 # asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13 movdqu 1008( % rdi), % xmm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15 # asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14 vpand % xmm0, % xmm6, % xmm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15 vpand % xmm0, % xmm10, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7 # asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6 vpand % xmm1, % xmm6, % xmm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11 # asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10 vpand % xmm1, % xmm10, % xmm10 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#7 # asm 2: psrlq $4,<v01=%xmm6 psrlq $4, % xmm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15 # asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14 vpor % xmm15, % xmm14, % xmm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7 # asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6 vpor % xmm10, % xmm6, % xmm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11 # asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10 vpand % xmm0, % xmm7, % xmm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15 vpand % xmm0, % xmm11, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8 # asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7 vpand % xmm1, % xmm7, % xmm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12 # asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11 vpand % xmm1, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#8 # asm 2: psrlq $4,<v01=%xmm7 psrlq $4, % xmm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11 # asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10 vpor % xmm15, % xmm10, % xmm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8 # asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7 vpor % xmm11, % xmm7, % xmm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12 # asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11 vpand % xmm0, % xmm8, % xmm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16 # asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15 vpand % xmm0, % xmm12, % xmm15 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#16 # asm 2: psllq $4,<v10=%xmm15 psllq $4, % xmm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9 # asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8 vpand % xmm1, % xmm8, % xmm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13 # asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12 vpand % xmm1, % xmm12, % xmm12 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#9 # asm 2: psrlq $4,<v01=%xmm8 psrlq $4, % xmm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12 # asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11 vpor % xmm15, % xmm11, % xmm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9 # asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8 vpor % xmm12, % xmm8, % xmm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13 # asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12 vpand % xmm0, % xmm9, % xmm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1 # asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0 vpand % xmm0, % xmm13, % xmm0 # qhasm: 2x v10 <<= 4 # asm 1: psllq $4,<v10=reg128#1 # asm 2: psllq $4,<v10=%xmm0 psllq $4, % xmm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10 # asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9 vpand % xmm1, % xmm9, % xmm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2 # asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1 vpand % xmm1, % xmm13, % xmm1 # qhasm: 2x v01 unsigned>>= 4 # asm 1: psrlq $4,<v01=reg128#10 # asm 2: psrlq $4,<v01=%xmm9 psrlq $4, % xmm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1 # asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0 vpor % xmm0, % xmm12, % xmm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1 vpor % xmm1, % xmm9, % xmm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10 # asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9 vpand % xmm2, % xmm14, % xmm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13 # asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12 vpand % xmm2, % xmm11, % xmm12 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#13 # asm 2: psllq $2,<v10=%xmm12 psllq $2, % xmm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14 # asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13 vpand % xmm3, % xmm14, % xmm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12 # asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11 vpand % xmm3, % xmm11, % xmm11 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#14 # asm 2: psrlq $2,<v01=%xmm13 psrlq $2, % xmm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10 # asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9 vpor % xmm12, % xmm9, % xmm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12 # asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11 vpor % xmm11, % xmm13, % xmm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13 # asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12 vpand % xmm2, % xmm10, % xmm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13 vpand % xmm2, % xmm0, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11 # asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10 vpand % xmm3, % xmm10, % xmm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0 vpand % xmm3, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#11 # asm 2: psrlq $2,<v01=%xmm10 psrlq $2, % xmm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13 # asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12 vpor % xmm13, % xmm12, % xmm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0 vpor % xmm0, % xmm10, % xmm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11 # asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10 vpand % xmm2, % xmm6, % xmm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14 # asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13 vpand % xmm2, % xmm8, % xmm13 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#14 # asm 2: psllq $2,<v10=%xmm13 psllq $2, % xmm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7 # asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6 vpand % xmm3, % xmm6, % xmm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9 # asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8 vpand % xmm3, % xmm8, % xmm8 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#7 # asm 2: psrlq $2,<v01=%xmm6 psrlq $2, % xmm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11 # asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10 vpor % xmm13, % xmm10, % xmm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7 # asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6 vpor % xmm8, % xmm6, % xmm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9 # asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8 vpand % xmm2, % xmm7, % xmm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3 # asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2 vpand % xmm2, % xmm1, % xmm2 # qhasm: 2x v10 <<= 2 # asm 1: psllq $2,<v10=reg128#3 # asm 2: psllq $2,<v10=%xmm2 psllq $2, % xmm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8 # asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7 vpand % xmm3, % xmm7, % xmm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1 vpand % xmm3, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 2 # asm 1: psrlq $2,<v01=reg128#8 # asm 2: psrlq $2,<v01=%xmm7 psrlq $2, % xmm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3 # asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2 vpor % xmm2, % xmm8, % xmm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1 vpor % xmm1, % xmm7, % xmm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4 # asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3 vpand % xmm4, % xmm9, % xmm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8 # asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7 vpand % xmm4, % xmm12, % xmm7 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#8 # asm 2: psllq $1,<v10=%xmm7 psllq $1, % xmm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9 # asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8 vpand % xmm5, % xmm9, % xmm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10 # asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9 vpand % xmm5, % xmm12, % xmm9 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#9 # asm 2: psrlq $1,<v01=%xmm8 psrlq $1, % xmm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4 # asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3 vpor % xmm7, % xmm3, % xmm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8 # asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7 vpor % xmm9, % xmm8, % xmm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9 # asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8 vpand % xmm4, % xmm11, % xmm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10 # asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9 vpand % xmm4, % xmm0, % xmm9 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#10 # asm 2: psllq $1,<v10=%xmm9 psllq $1, % xmm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12 # asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11 vpand % xmm5, % xmm11, % xmm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1 # asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0 vpand % xmm5, % xmm0, % xmm0 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#12 # asm 2: psrlq $1,<v01=%xmm11 psrlq $1, % xmm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9 # asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8 vpor % xmm9, % xmm8, % xmm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1 # asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0 vpor % xmm0, % xmm11, % xmm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10 # asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9 vpand % xmm4, % xmm10, % xmm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12 # asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11 vpand % xmm4, % xmm2, % xmm11 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#12 # asm 2: psllq $1,<v10=%xmm11 psllq $1, % xmm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11 # asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10 vpand % xmm5, % xmm10, % xmm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3 # asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2 vpand % xmm5, % xmm2, % xmm2 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#11 # asm 2: psrlq $1,<v01=%xmm10 psrlq $1, % xmm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10 # asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9 vpor % xmm11, % xmm9, % xmm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3 # asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2 vpor % xmm2, % xmm10, % xmm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11 # asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10 vpand % xmm4, % xmm6, % xmm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5 # asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4 vpand % xmm4, % xmm1, % xmm4 # qhasm: 2x v10 <<= 1 # asm 1: psllq $1,<v10=reg128#5 # asm 2: psllq $1,<v10=%xmm4 psllq $1, % xmm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7 # asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6 vpand % xmm5, % xmm6, % xmm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2 # asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1 vpand % xmm5, % xmm1, % xmm1 # qhasm: 2x v01 unsigned>>= 1 # asm 1: psrlq $1,<v01=reg128#7 # asm 2: psrlq $1,<v01=%xmm6 psrlq $1, % xmm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5 # asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4 vpor % xmm4, % xmm10, % xmm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2 # asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1 vpor % xmm1, % xmm6, % xmm1 # qhasm: mem128[ input_0 + 896 ] = x0 # asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1) # asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi) movdqu % xmm3, 896( % rdi) # qhasm: mem128[ input_0 + 912 ] = x1 # asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1) # asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi) movdqu % xmm7, 912( % rdi) # qhasm: mem128[ input_0 + 928 ] = x2 # asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1) # asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi) movdqu % xmm8, 928( % rdi) # qhasm: mem128[ input_0 + 944 ] = x3 # asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1) # asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi) movdqu % xmm0, 944( % rdi) # qhasm: mem128[ input_0 + 960 ] = x4 # asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1) # asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi) movdqu % xmm9, 960( % rdi) # qhasm: mem128[ input_0 + 976 ] = x5 # asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1) # asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi) movdqu % xmm2, 976( % rdi) # qhasm: mem128[ input_0 + 992 ] = x6 # asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1) # asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi) movdqu % xmm4, 992( % rdi) # qhasm: mem128[ input_0 + 1008 ] = x7 # asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1) # asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi) movdqu % xmm1, 1008( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
69,549
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/vec256_mul_asm.S
#include "namespace.h" #define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm) #define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_mul_asm .p2align 5 .global _vec256_mul_asm .global vec256_mul_asm _vec256_mul_asm: vec256_mul_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) vmovupd % ymm2, 384( % rdi) # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) vmovupd % ymm1, 352( % rdi) # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) vmovupd % ymm12, 288( % rdi) # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) vmovupd % ymm11, 256( % rdi) # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) vmovupd % ymm10, 224( % rdi) # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) vmovupd % ymm7, 128( % rdi) # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) vmovupd % ymm6, 96( % rdi) # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) vmovupd % ymm5, 64( % rdi) # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) vmovupd % ymm4, 32( % rdi) # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) vmovupd % ymm3, 0( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
264,233
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/transpose_64x256_sp_asm.S
#include "namespace.h" #define MASK0_0 CRYPTO_NAMESPACE(MASK0_0) #define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0) #define MASK0_1 CRYPTO_NAMESPACE(MASK0_1) #define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1) #define MASK1_0 CRYPTO_NAMESPACE(MASK1_0) #define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0) #define MASK1_1 CRYPTO_NAMESPACE(MASK1_1) #define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1) #define MASK2_0 CRYPTO_NAMESPACE(MASK2_0) #define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0) #define MASK2_1 CRYPTO_NAMESPACE(MASK2_1) #define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1) #define MASK3_0 CRYPTO_NAMESPACE(MASK3_0) #define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0) #define MASK3_1 CRYPTO_NAMESPACE(MASK3_1) #define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1) #define MASK4_0 CRYPTO_NAMESPACE(MASK4_0) #define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0) #define MASK4_1 CRYPTO_NAMESPACE(MASK4_1) #define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1) #define MASK5_0 CRYPTO_NAMESPACE(MASK5_0) #define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0) #define MASK5_1 CRYPTO_NAMESPACE(MASK5_1) #define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1) #define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm) #define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 x0 # qhasm: reg256 x1 # qhasm: reg256 x2 # qhasm: reg256 x3 # qhasm: reg256 x4 # qhasm: reg256 x5 # qhasm: reg256 x6 # qhasm: reg256 x7 # qhasm: reg256 t0 # qhasm: reg256 t1 # qhasm: reg256 v00 # qhasm: reg256 v01 # qhasm: reg256 v10 # qhasm: reg256 v11 # qhasm: reg256 mask0 # qhasm: reg256 mask1 # qhasm: reg256 mask2 # qhasm: reg256 mask3 # qhasm: reg256 mask4 # qhasm: reg256 mask5 # qhasm: enter transpose_64x256_sp_asm .p2align 5 .global _transpose_64x256_sp_asm .global transpose_64x256_sp_asm _transpose_64x256_sp_asm: transpose_64x256_sp_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: mask0 aligned= mem256[ MASK5_0 ] # asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0 vmovapd MASK5_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK5_1 ] # asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1 vmovapd MASK5_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK4_0 ] # asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2 vmovapd MASK4_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK4_1 ] # asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3 vmovapd MASK4_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK3_0 ] # asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4 vmovapd MASK3_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK3_1 ] # asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5 vmovapd MASK3_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7 vmovupd 256( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8 vmovupd 512( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9 vmovupd 768( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10 vmovupd 1024( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11 vmovupd 1280( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12 vmovupd 1536( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13 vmovupd 1792( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 256 ] = x1 # asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi) vmovupd % ymm13, 256( % rdi) # qhasm: mem256[ input_0 + 512 ] = x2 # asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi) vmovupd % ymm14, 512( % rdi) # qhasm: mem256[ input_0 + 768 ] = x3 # asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi) vmovupd % ymm10, 768( % rdi) # qhasm: mem256[ input_0 + 1024 ] = x4 # asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi) vmovupd % ymm11, 1024( % rdi) # qhasm: mem256[ input_0 + 1280 ] = x5 # asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi) vmovupd % ymm8, 1280( % rdi) # qhasm: mem256[ input_0 + 1536 ] = x6 # asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi) vmovupd % ymm12, 1536( % rdi) # qhasm: mem256[ input_0 + 1792 ] = x7 # asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi) vmovupd % ymm6, 1792( % rdi) # qhasm: x0 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6 vmovupd 32( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8 vmovupd 544( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9 vmovupd 800( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10 vmovupd 1056( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11 vmovupd 1312( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12 vmovupd 1568( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13 vmovupd 1824( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 32 ] = x0 # asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi) vmovupd % ymm9, 32( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 544 ] = x2 # asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi) vmovupd % ymm14, 544( % rdi) # qhasm: mem256[ input_0 + 800 ] = x3 # asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi) vmovupd % ymm10, 800( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x4 # asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi) vmovupd % ymm11, 1056( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x5 # asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi) vmovupd % ymm8, 1312( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x6 # asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi) vmovupd % ymm12, 1568( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x7 # asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi) vmovupd % ymm6, 1824( % rdi) # qhasm: x0 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6 vmovupd 64( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7 vmovupd 320( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9 vmovupd 832( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10 vmovupd 1088( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11 vmovupd 1344( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12 vmovupd 1600( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13 vmovupd 1856( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 64 ] = x0 # asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi) vmovupd % ymm9, 64( % rdi) # qhasm: mem256[ input_0 + 320 ] = x1 # asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi) vmovupd % ymm13, 320( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 832 ] = x3 # asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi) vmovupd % ymm10, 832( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x4 # asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi) vmovupd % ymm11, 1088( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x5 # asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi) vmovupd % ymm8, 1344( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x6 # asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi) vmovupd % ymm12, 1600( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x7 # asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi) vmovupd % ymm6, 1856( % rdi) # qhasm: x0 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6 vmovupd 96( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7 vmovupd 352( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8 vmovupd 608( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10 vmovupd 1120( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11 vmovupd 1376( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12 vmovupd 1632( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13 vmovupd 1888( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 96 ] = x0 # asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi) vmovupd % ymm9, 96( % rdi) # qhasm: mem256[ input_0 + 352 ] = x1 # asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi) vmovupd % ymm13, 352( % rdi) # qhasm: mem256[ input_0 + 608 ] = x2 # asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi) vmovupd % ymm14, 608( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x4 # asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi) vmovupd % ymm11, 1120( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x5 # asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi) vmovupd % ymm8, 1376( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x6 # asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi) vmovupd % ymm12, 1632( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x7 # asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi) vmovupd % ymm6, 1888( % rdi) # qhasm: x0 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6 vmovupd 128( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7 vmovupd 384( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8 vmovupd 640( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9 vmovupd 896( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11 vmovupd 1408( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12 vmovupd 1664( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13 vmovupd 1920( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 128 ] = x0 # asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi) vmovupd % ymm9, 128( % rdi) # qhasm: mem256[ input_0 + 384 ] = x1 # asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi) vmovupd % ymm13, 384( % rdi) # qhasm: mem256[ input_0 + 640 ] = x2 # asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi) vmovupd % ymm14, 640( % rdi) # qhasm: mem256[ input_0 + 896 ] = x3 # asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi) vmovupd % ymm10, 896( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x5 # asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi) vmovupd % ymm8, 1408( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x6 # asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi) vmovupd % ymm12, 1664( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x7 # asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi) vmovupd % ymm6, 1920( % rdi) # qhasm: x0 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6 vmovupd 160( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7 vmovupd 416( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8 vmovupd 672( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9 vmovupd 928( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10 vmovupd 1184( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12 vmovupd 1696( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13 vmovupd 1952( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 160 ] = x0 # asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi) vmovupd % ymm9, 160( % rdi) # qhasm: mem256[ input_0 + 416 ] = x1 # asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi) vmovupd % ymm13, 416( % rdi) # qhasm: mem256[ input_0 + 672 ] = x2 # asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi) vmovupd % ymm14, 672( % rdi) # qhasm: mem256[ input_0 + 928 ] = x3 # asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi) vmovupd % ymm10, 928( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x4 # asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi) vmovupd % ymm11, 1184( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x6 # asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi) vmovupd % ymm12, 1696( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x7 # asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi) vmovupd % ymm6, 1952( % rdi) # qhasm: x0 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6 vmovupd 192( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7 vmovupd 448( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8 vmovupd 704( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9 vmovupd 960( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10 vmovupd 1216( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11 vmovupd 1472( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13 vmovupd 1984( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15 vpsllq $32, % ymm13, % ymm15 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15 vpslld $16, % ymm11, % ymm15 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14 vpsrld $16, % ymm14, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16 # asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15 vpslld $16, % ymm12, % ymm15 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15 vpslld $16, % ymm8, % ymm15 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16 # asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15 vpslld $16, % ymm9, % ymm15 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16 # asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15 vpsllw $8, % ymm14, % ymm15 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14 # asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13 vpsrlw $8, % ymm13, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16 # asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15 vpsllw $8, % ymm10, % ymm15 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16 # asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15 vpsllw $8, % ymm8, % ymm15 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13 # asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12 vpsrlw $8, % ymm12, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16 # asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15 vpsllw $8, % ymm7, % ymm15 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 192 ] = x0 # asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi) vmovupd % ymm9, 192( % rdi) # qhasm: mem256[ input_0 + 448 ] = x1 # asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi) vmovupd % ymm13, 448( % rdi) # qhasm: mem256[ input_0 + 704 ] = x2 # asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi) vmovupd % ymm14, 704( % rdi) # qhasm: mem256[ input_0 + 960 ] = x3 # asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi) vmovupd % ymm10, 960( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x4 # asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi) vmovupd % ymm11, 1216( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x5 # asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi) vmovupd % ymm8, 1472( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x7 # asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi) vmovupd % ymm6, 1984( % rdi) # qhasm: x0 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6 vmovupd 224( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7 vmovupd 480( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8 vmovupd 736( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9 vmovupd 992( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10 vmovupd 1248( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11 vmovupd 1504( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12 vmovupd 1760( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: 4x v10 = x4 << 32 # asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16 # asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15 vpsllq $32, % ymm10, % ymm15 # qhasm: 4x v01 = x0 unsigned>> 32 # asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7 # asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6 vpsrlq $32, % ymm6, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: 4x v10 = x5 << 32 # asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16 # asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15 vpsllq $32, % ymm11, % ymm15 # qhasm: 4x v01 = x1 unsigned>> 32 # asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8 # asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7 vpsrlq $32, % ymm7, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: 4x v10 = x6 << 32 # asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16 # asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15 vpsllq $32, % ymm12, % ymm15 # qhasm: 4x v01 = x2 unsigned>> 32 # asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9 # asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8 vpsrlq $32, % ymm8, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0 vpand % ymm9, % ymm0, % ymm0 # qhasm: 4x v10 = x7 << 32 # asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13 # asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12 vpsllq $32, % ymm13, % ymm12 # qhasm: 4x v01 = x3 unsigned>> 32 # asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10 # asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9 vpsrlq $32, % ymm9, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1 # asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0 vpor % ymm0, % ymm12, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: 8x v10 = x2 << 16 # asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13 # asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12 vpslld $16, % ymm11, % ymm12 # qhasm: 8x v01 = x0 unsigned>> 16 # asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14 # asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13 vpsrld $16, % ymm14, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: 8x v10 = x3 << 16 # asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14 # asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13 vpslld $16, % ymm0, % ymm13 # qhasm: 8x v01 = x1 unsigned>> 16 # asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11 # asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10 vpsrld $16, % ymm10, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: 8x v10 = x6 << 16 # asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14 # asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13 vpslld $16, % ymm8, % ymm13 # qhasm: 8x v01 = x4 unsigned>> 16 # asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7 # asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6 vpsrld $16, % ymm6, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2 vpand % ymm7, % ymm2, % ymm2 # qhasm: 8x v10 = x7 << 16 # asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9 # asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8 vpslld $16, % ymm1, % ymm8 # qhasm: 8x v01 = x5 unsigned>> 16 # asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8 # asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7 vpsrld $16, % ymm7, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3 # asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2 vpor % ymm2, % ymm8, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: 16x v10 = x1 << 8 # asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8 # asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7 vpsllw $8, % ymm12, % ymm7 # qhasm: 16x v01 = x0 unsigned>> 8 # asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9 # asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8 vpsrlw $8, % ymm9, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: 16x v10 = x3 << 8 # asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10 # asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9 vpsllw $8, % ymm0, % ymm9 # qhasm: 16x v01 = x2 unsigned>> 8 # asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12 # asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11 vpsrlw $8, % ymm11, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: 16x v10 = x5 << 8 # asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12 # asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11 vpsllw $8, % ymm2, % ymm11 # qhasm: 16x v01 = x4 unsigned>> 8 # asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11 # asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10 vpsrlw $8, % ymm10, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4 vpand % ymm6, % ymm4, % ymm4 # qhasm: 16x v10 = x7 << 8 # asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11 # asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10 vpsllw $8, % ymm1, % ymm10 # qhasm: 16x v01 = x6 unsigned>> 8 # asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7 # asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6 vpsrlw $8, % ymm6, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5 # asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4 vpor % ymm4, % ymm10, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 224 ] = x0 # asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi) vmovupd % ymm3, 224( % rdi) # qhasm: mem256[ input_0 + 480 ] = x1 # asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi) vmovupd % ymm7, 480( % rdi) # qhasm: mem256[ input_0 + 736 ] = x2 # asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi) vmovupd % ymm8, 736( % rdi) # qhasm: mem256[ input_0 + 992 ] = x3 # asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi) vmovupd % ymm0, 992( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x4 # asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi) vmovupd % ymm9, 1248( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x5 # asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi) vmovupd % ymm2, 1504( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x6 # asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi) vmovupd % ymm4, 1760( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: mask0 aligned= mem256[ MASK2_0 ] # asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1 # asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0 vmovapd MASK2_0( % rip), % ymm0 # qhasm: mask1 aligned= mem256[ MASK2_1 ] # asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2 # asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1 vmovapd MASK2_1( % rip), % ymm1 # qhasm: mask2 aligned= mem256[ MASK1_0 ] # asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3 # asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2 vmovapd MASK1_0( % rip), % ymm2 # qhasm: mask3 aligned= mem256[ MASK1_1 ] # asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4 # asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3 vmovapd MASK1_1( % rip), % ymm3 # qhasm: mask4 aligned= mem256[ MASK0_0 ] # asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5 # asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4 vmovapd MASK0_0( % rip), % ymm4 # qhasm: mask5 aligned= mem256[ MASK0_1 ] # asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6 # asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5 vmovapd MASK0_1( % rip), % ymm5 # qhasm: x0 = mem256[ input_0 + 0 ] # asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6 vmovupd 0( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 32 ] # asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7 vmovupd 32( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 64 ] # asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8 vmovupd 64( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 96 ] # asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9 vmovupd 96( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 128 ] # asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10 vmovupd 128( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 160 ] # asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11 vmovupd 160( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 192 ] # asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12 vmovupd 192( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 224 ] # asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13 vmovupd 224( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 0 ] = x0 # asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi) vmovupd % ymm9, 0( % rdi) # qhasm: mem256[ input_0 + 32 ] = x1 # asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi) vmovupd % ymm13, 32( % rdi) # qhasm: mem256[ input_0 + 64 ] = x2 # asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi) vmovupd % ymm14, 64( % rdi) # qhasm: mem256[ input_0 + 96 ] = x3 # asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi) vmovupd % ymm10, 96( % rdi) # qhasm: mem256[ input_0 + 128 ] = x4 # asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi) vmovupd % ymm11, 128( % rdi) # qhasm: mem256[ input_0 + 160 ] = x5 # asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi) vmovupd % ymm8, 160( % rdi) # qhasm: mem256[ input_0 + 192 ] = x6 # asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi) vmovupd % ymm12, 192( % rdi) # qhasm: mem256[ input_0 + 224 ] = x7 # asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi) vmovupd % ymm6, 224( % rdi) # qhasm: x0 = mem256[ input_0 + 256 ] # asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6 vmovupd 256( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 288 ] # asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7 vmovupd 288( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 320 ] # asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8 vmovupd 320( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 352 ] # asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9 vmovupd 352( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 384 ] # asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10 vmovupd 384( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 416 ] # asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11 vmovupd 416( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 448 ] # asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12 vmovupd 448( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 480 ] # asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13 vmovupd 480( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 256 ] = x0 # asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi) vmovupd % ymm9, 256( % rdi) # qhasm: mem256[ input_0 + 288 ] = x1 # asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi) vmovupd % ymm13, 288( % rdi) # qhasm: mem256[ input_0 + 320 ] = x2 # asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi) vmovupd % ymm14, 320( % rdi) # qhasm: mem256[ input_0 + 352 ] = x3 # asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi) vmovupd % ymm10, 352( % rdi) # qhasm: mem256[ input_0 + 384 ] = x4 # asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi) vmovupd % ymm11, 384( % rdi) # qhasm: mem256[ input_0 + 416 ] = x5 # asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi) vmovupd % ymm8, 416( % rdi) # qhasm: mem256[ input_0 + 448 ] = x6 # asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi) vmovupd % ymm12, 448( % rdi) # qhasm: mem256[ input_0 + 480 ] = x7 # asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi) vmovupd % ymm6, 480( % rdi) # qhasm: x0 = mem256[ input_0 + 512 ] # asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6 vmovupd 512( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 544 ] # asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7 vmovupd 544( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 576 ] # asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8 vmovupd 576( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 608 ] # asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9 vmovupd 608( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 640 ] # asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10 vmovupd 640( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 672 ] # asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11 vmovupd 672( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 704 ] # asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12 vmovupd 704( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 736 ] # asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13 vmovupd 736( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 512 ] = x0 # asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi) vmovupd % ymm9, 512( % rdi) # qhasm: mem256[ input_0 + 544 ] = x1 # asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi) vmovupd % ymm13, 544( % rdi) # qhasm: mem256[ input_0 + 576 ] = x2 # asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi) vmovupd % ymm14, 576( % rdi) # qhasm: mem256[ input_0 + 608 ] = x3 # asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi) vmovupd % ymm10, 608( % rdi) # qhasm: mem256[ input_0 + 640 ] = x4 # asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi) vmovupd % ymm11, 640( % rdi) # qhasm: mem256[ input_0 + 672 ] = x5 # asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi) vmovupd % ymm8, 672( % rdi) # qhasm: mem256[ input_0 + 704 ] = x6 # asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi) vmovupd % ymm12, 704( % rdi) # qhasm: mem256[ input_0 + 736 ] = x7 # asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi) vmovupd % ymm6, 736( % rdi) # qhasm: x0 = mem256[ input_0 + 768 ] # asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6 vmovupd 768( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 800 ] # asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7 vmovupd 800( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 832 ] # asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8 vmovupd 832( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 864 ] # asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9 vmovupd 864( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 896 ] # asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10 vmovupd 896( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 928 ] # asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11 vmovupd 928( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 960 ] # asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12 vmovupd 960( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 992 ] # asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13 vmovupd 992( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 768 ] = x0 # asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi) vmovupd % ymm9, 768( % rdi) # qhasm: mem256[ input_0 + 800 ] = x1 # asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi) vmovupd % ymm13, 800( % rdi) # qhasm: mem256[ input_0 + 832 ] = x2 # asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi) vmovupd % ymm14, 832( % rdi) # qhasm: mem256[ input_0 + 864 ] = x3 # asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi) vmovupd % ymm10, 864( % rdi) # qhasm: mem256[ input_0 + 896 ] = x4 # asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi) vmovupd % ymm11, 896( % rdi) # qhasm: mem256[ input_0 + 928 ] = x5 # asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi) vmovupd % ymm8, 928( % rdi) # qhasm: mem256[ input_0 + 960 ] = x6 # asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi) vmovupd % ymm12, 960( % rdi) # qhasm: mem256[ input_0 + 992 ] = x7 # asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi) vmovupd % ymm6, 992( % rdi) # qhasm: x0 = mem256[ input_0 + 1024 ] # asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6 vmovupd 1024( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1056 ] # asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7 vmovupd 1056( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1088 ] # asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8 vmovupd 1088( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1120 ] # asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9 vmovupd 1120( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1152 ] # asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10 vmovupd 1152( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1184 ] # asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11 vmovupd 1184( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1216 ] # asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12 vmovupd 1216( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1248 ] # asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13 vmovupd 1248( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1024 ] = x0 # asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi) vmovupd % ymm9, 1024( % rdi) # qhasm: mem256[ input_0 + 1056 ] = x1 # asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi) vmovupd % ymm13, 1056( % rdi) # qhasm: mem256[ input_0 + 1088 ] = x2 # asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi) vmovupd % ymm14, 1088( % rdi) # qhasm: mem256[ input_0 + 1120 ] = x3 # asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi) vmovupd % ymm10, 1120( % rdi) # qhasm: mem256[ input_0 + 1152 ] = x4 # asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi) vmovupd % ymm11, 1152( % rdi) # qhasm: mem256[ input_0 + 1184 ] = x5 # asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi) vmovupd % ymm8, 1184( % rdi) # qhasm: mem256[ input_0 + 1216 ] = x6 # asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi) vmovupd % ymm12, 1216( % rdi) # qhasm: mem256[ input_0 + 1248 ] = x7 # asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi) vmovupd % ymm6, 1248( % rdi) # qhasm: x0 = mem256[ input_0 + 1280 ] # asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6 vmovupd 1280( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1312 ] # asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7 vmovupd 1312( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1344 ] # asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8 vmovupd 1344( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1376 ] # asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9 vmovupd 1376( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1408 ] # asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10 vmovupd 1408( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1440 ] # asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11 vmovupd 1440( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1472 ] # asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12 vmovupd 1472( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1504 ] # asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13 vmovupd 1504( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1280 ] = x0 # asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi) vmovupd % ymm9, 1280( % rdi) # qhasm: mem256[ input_0 + 1312 ] = x1 # asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi) vmovupd % ymm13, 1312( % rdi) # qhasm: mem256[ input_0 + 1344 ] = x2 # asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi) vmovupd % ymm14, 1344( % rdi) # qhasm: mem256[ input_0 + 1376 ] = x3 # asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi) vmovupd % ymm10, 1376( % rdi) # qhasm: mem256[ input_0 + 1408 ] = x4 # asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi) vmovupd % ymm11, 1408( % rdi) # qhasm: mem256[ input_0 + 1440 ] = x5 # asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi) vmovupd % ymm8, 1440( % rdi) # qhasm: mem256[ input_0 + 1472 ] = x6 # asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi) vmovupd % ymm12, 1472( % rdi) # qhasm: mem256[ input_0 + 1504 ] = x7 # asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi) vmovupd % ymm6, 1504( % rdi) # qhasm: x0 = mem256[ input_0 + 1536 ] # asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6 vmovupd 1536( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1568 ] # asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7 vmovupd 1568( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1600 ] # asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8 vmovupd 1600( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1632 ] # asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9 vmovupd 1632( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1664 ] # asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10 vmovupd 1664( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1696 ] # asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11 vmovupd 1696( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1728 ] # asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12 vmovupd 1728( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 1760 ] # asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13 vmovupd 1760( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15 vpand % ymm13, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13 vpand % ymm13, % ymm1, % ymm13 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10 # asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9 vpor % ymm9, % ymm13, % ymm9 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13 vpand % ymm14, % ymm2, % ymm13 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15 vpand % ymm11, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14 vpand % ymm14, % ymm3, % ymm14 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15 # asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14 vpsrlq $2, % ymm14, % ymm14 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14 # asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13 vpor % ymm13, % ymm15, % ymm13 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11 vpor % ymm14, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14 vpand % ymm10, % ymm2, % ymm14 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15 vpand % ymm12, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13 # asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12 vpand % ymm12, % ymm3, % ymm12 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11 # asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10 vpor % ymm10, % ymm12, % ymm10 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12 vpand % ymm6, % ymm2, % ymm12 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15 vpand % ymm8, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16 # asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15 vpand % ymm9, % ymm2, % ymm15 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15 vpsllq $2, % ymm15, % ymm15 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10 # asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9 vpand % ymm9, % ymm3, % ymm9 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8 vpor % ymm8, % ymm15, % ymm8 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7 vpor % ymm7, % ymm9, % ymm7 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9 vpand % ymm13, % ymm4, % ymm9 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15 vpand % ymm14, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14 # asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13 vpand % ymm13, % ymm5, % ymm13 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15 # asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14 vpand % ymm14, % ymm5, % ymm14 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13 vpsrlq $1, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9 vpor % ymm9, % ymm15, % ymm9 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14 # asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13 vpor % ymm13, % ymm14, % ymm13 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14 vpand % ymm11, % ymm4, % ymm14 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15 vpand % ymm10, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11 # asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11 # asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10 vpor % ymm11, % ymm10, % ymm10 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12 # asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11 vpand % ymm12, % ymm4, % ymm11 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15 vpand % ymm8, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13 # asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12 vpand % ymm12, % ymm5, % ymm12 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9 # asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8 vpand % ymm8, % ymm5, % ymm8 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13 # asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12 vpsrlq $1, % ymm12, % ymm12 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9 # asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8 vpor % ymm12, % ymm8, % ymm8 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12 vpand % ymm6, % ymm4, % ymm12 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16 # asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15 vpand % ymm7, % ymm4, % ymm15 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15 vpsllq $1, % ymm15, % ymm15 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8 # asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7 vpand % ymm7, % ymm5, % ymm7 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12 vpor % ymm12, % ymm15, % ymm12 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6 vpor % ymm6, % ymm7, % ymm6 # qhasm: mem256[ input_0 + 1536 ] = x0 # asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi) vmovupd % ymm9, 1536( % rdi) # qhasm: mem256[ input_0 + 1568 ] = x1 # asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi) vmovupd % ymm13, 1568( % rdi) # qhasm: mem256[ input_0 + 1600 ] = x2 # asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi) vmovupd % ymm14, 1600( % rdi) # qhasm: mem256[ input_0 + 1632 ] = x3 # asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi) vmovupd % ymm10, 1632( % rdi) # qhasm: mem256[ input_0 + 1664 ] = x4 # asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi) vmovupd % ymm11, 1664( % rdi) # qhasm: mem256[ input_0 + 1696 ] = x5 # asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi) vmovupd % ymm8, 1696( % rdi) # qhasm: mem256[ input_0 + 1728 ] = x6 # asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi) vmovupd % ymm12, 1728( % rdi) # qhasm: mem256[ input_0 + 1760 ] = x7 # asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi) vmovupd % ymm6, 1760( % rdi) # qhasm: x0 = mem256[ input_0 + 1792 ] # asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7 # asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6 vmovupd 1792( % rdi), % ymm6 # qhasm: x1 = mem256[ input_0 + 1824 ] # asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8 # asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7 vmovupd 1824( % rdi), % ymm7 # qhasm: x2 = mem256[ input_0 + 1856 ] # asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9 # asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8 vmovupd 1856( % rdi), % ymm8 # qhasm: x3 = mem256[ input_0 + 1888 ] # asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10 # asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9 vmovupd 1888( % rdi), % ymm9 # qhasm: x4 = mem256[ input_0 + 1920 ] # asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11 # asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10 vmovupd 1920( % rdi), % ymm10 # qhasm: x5 = mem256[ input_0 + 1952 ] # asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12 # asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11 vmovupd 1952( % rdi), % ymm11 # qhasm: x6 = mem256[ input_0 + 1984 ] # asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13 # asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12 vmovupd 1984( % rdi), % ymm12 # qhasm: x7 = mem256[ input_0 + 2016 ] # asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14 # asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13 vmovupd 2016( % rdi), % ymm13 # qhasm: v00 = x0 & mask0 # asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15 # asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14 vpand % ymm6, % ymm0, % ymm14 # qhasm: v10 = x4 & mask0 # asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15 vpand % ymm10, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x0 & mask1 # asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7 # asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6 vpand % ymm6, % ymm1, % ymm6 # qhasm: v11 = x4 & mask1 # asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11 # asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10 vpand % ymm10, % ymm1, % ymm10 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6 vpsrlq $4, % ymm6, % ymm6 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15 # asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14 vpor % ymm14, % ymm15, % ymm14 # qhasm: x4 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6 vpor % ymm6, % ymm10, % ymm6 # qhasm: v00 = x1 & mask0 # asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11 # asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10 vpand % ymm7, % ymm0, % ymm10 # qhasm: v10 = x5 & mask0 # asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15 vpand % ymm11, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x1 & mask1 # asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8 # asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7 vpand % ymm7, % ymm1, % ymm7 # qhasm: v11 = x5 & mask1 # asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12 # asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11 vpand % ymm11, % ymm1, % ymm11 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7 vpsrlq $4, % ymm7, % ymm7 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10 vpor % ymm10, % ymm15, % ymm10 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8 # asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7 vpor % ymm7, % ymm11, % ymm7 # qhasm: v00 = x2 & mask0 # asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12 # asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11 vpand % ymm8, % ymm0, % ymm11 # qhasm: v10 = x6 & mask0 # asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16 # asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15 vpand % ymm12, % ymm0, % ymm15 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16 # asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15 vpsllq $4, % ymm15, % ymm15 # qhasm: v01 = x2 & mask1 # asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9 # asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8 vpand % ymm8, % ymm1, % ymm8 # qhasm: v11 = x6 & mask1 # asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13 # asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12 vpand % ymm12, % ymm1, % ymm12 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8 vpsrlq $4, % ymm8, % ymm8 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12 # asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11 vpor % ymm11, % ymm15, % ymm11 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9 # asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8 vpor % ymm8, % ymm12, % ymm8 # qhasm: v00 = x3 & mask0 # asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13 # asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12 vpand % ymm9, % ymm0, % ymm12 # qhasm: v10 = x7 & mask0 # asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1 # asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0 vpand % ymm13, % ymm0, % ymm0 # qhasm: 4x v10 <<= 4 # asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1 # asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0 vpsllq $4, % ymm0, % ymm0 # qhasm: v01 = x3 & mask1 # asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10 # asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9 vpand % ymm9, % ymm1, % ymm9 # qhasm: v11 = x7 & mask1 # asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2 # asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1 vpand % ymm13, % ymm1, % ymm1 # qhasm: 4x v01 unsigned>>= 4 # asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10 # asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9 vpsrlq $4, % ymm9, % ymm9 # qhasm: x3 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1 # asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0 vpor % ymm12, % ymm0, % ymm0 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1 vpor % ymm9, % ymm1, % ymm1 # qhasm: v00 = x0 & mask2 # asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10 # asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9 vpand % ymm14, % ymm2, % ymm9 # qhasm: v10 = x2 & mask2 # asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13 # asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12 vpand % ymm11, % ymm2, % ymm12 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13 # asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12 vpsllq $2, % ymm12, % ymm12 # qhasm: v01 = x0 & mask3 # asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14 # asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13 vpand % ymm14, % ymm3, % ymm13 # qhasm: v11 = x2 & mask3 # asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12 # asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11 vpand % ymm11, % ymm3, % ymm11 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14 # asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13 vpsrlq $2, % ymm13, % ymm13 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9 vpor % ymm9, % ymm12, % ymm9 # qhasm: x2 = v01 | v11 # asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12 # asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11 vpor % ymm13, % ymm11, % ymm11 # qhasm: v00 = x1 & mask2 # asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13 # asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12 vpand % ymm10, % ymm2, % ymm12 # qhasm: v10 = x3 & mask2 # asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13 vpand % ymm0, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x1 & mask3 # asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11 # asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10 vpand % ymm10, % ymm3, % ymm10 # qhasm: v11 = x3 & mask3 # asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0 vpand % ymm0, % ymm3, % ymm0 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10 vpsrlq $2, % ymm10, % ymm10 # qhasm: x1 = v00 | v10 # asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13 # asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12 vpor % ymm12, % ymm13, % ymm12 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0 vpor % ymm10, % ymm0, % ymm0 # qhasm: v00 = x4 & mask2 # asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11 # asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10 vpand % ymm6, % ymm2, % ymm10 # qhasm: v10 = x6 & mask2 # asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14 # asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13 vpand % ymm8, % ymm2, % ymm13 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14 # asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13 vpsllq $2, % ymm13, % ymm13 # qhasm: v01 = x4 & mask3 # asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7 # asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6 vpand % ymm6, % ymm3, % ymm6 # qhasm: v11 = x6 & mask3 # asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9 # asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8 vpand % ymm8, % ymm3, % ymm8 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6 vpsrlq $2, % ymm6, % ymm6 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11 # asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10 vpor % ymm10, % ymm13, % ymm10 # qhasm: x6 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7 # asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6 vpor % ymm6, % ymm8, % ymm6 # qhasm: v00 = x5 & mask2 # asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9 # asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8 vpand % ymm7, % ymm2, % ymm8 # qhasm: v10 = x7 & mask2 # asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3 # asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2 vpand % ymm1, % ymm2, % ymm2 # qhasm: 4x v10 <<= 2 # asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3 # asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2 vpsllq $2, % ymm2, % ymm2 # qhasm: v01 = x5 & mask3 # asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8 # asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7 vpand % ymm7, % ymm3, % ymm7 # qhasm: v11 = x7 & mask3 # asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1 vpand % ymm1, % ymm3, % ymm1 # qhasm: 4x v01 unsigned>>= 2 # asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8 # asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7 vpsrlq $2, % ymm7, % ymm7 # qhasm: x5 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3 # asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2 vpor % ymm8, % ymm2, % ymm2 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1 vpor % ymm7, % ymm1, % ymm1 # qhasm: v00 = x0 & mask4 # asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4 # asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3 vpand % ymm9, % ymm4, % ymm3 # qhasm: v10 = x1 & mask4 # asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8 # asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7 vpand % ymm12, % ymm4, % ymm7 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8 # asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7 vpsllq $1, % ymm7, % ymm7 # qhasm: v01 = x0 & mask5 # asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9 # asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8 vpand % ymm9, % ymm5, % ymm8 # qhasm: v11 = x1 & mask5 # asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10 # asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9 vpand % ymm12, % ymm5, % ymm9 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9 # asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8 vpsrlq $1, % ymm8, % ymm8 # qhasm: x0 = v00 | v10 # asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4 # asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3 vpor % ymm3, % ymm7, % ymm3 # qhasm: x1 = v01 | v11 # asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8 # asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7 vpor % ymm8, % ymm9, % ymm7 # qhasm: v00 = x2 & mask4 # asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9 # asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8 vpand % ymm11, % ymm4, % ymm8 # qhasm: v10 = x3 & mask4 # asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10 # asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9 vpand % ymm0, % ymm4, % ymm9 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10 # asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9 vpsllq $1, % ymm9, % ymm9 # qhasm: v01 = x2 & mask5 # asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12 # asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11 vpand % ymm11, % ymm5, % ymm11 # qhasm: v11 = x3 & mask5 # asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1 # asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0 vpand % ymm0, % ymm5, % ymm0 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12 # asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11 vpsrlq $1, % ymm11, % ymm11 # qhasm: x2 = v00 | v10 # asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9 # asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8 vpor % ymm8, % ymm9, % ymm8 # qhasm: x3 = v01 | v11 # asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1 # asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0 vpor % ymm11, % ymm0, % ymm0 # qhasm: v00 = x4 & mask4 # asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10 # asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9 vpand % ymm10, % ymm4, % ymm9 # qhasm: v10 = x5 & mask4 # asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12 # asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11 vpand % ymm2, % ymm4, % ymm11 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12 # asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11 vpsllq $1, % ymm11, % ymm11 # qhasm: v01 = x4 & mask5 # asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11 # asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10 vpand % ymm10, % ymm5, % ymm10 # qhasm: v11 = x5 & mask5 # asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3 # asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2 vpand % ymm2, % ymm5, % ymm2 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11 # asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10 vpsrlq $1, % ymm10, % ymm10 # qhasm: x4 = v00 | v10 # asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10 # asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9 vpor % ymm9, % ymm11, % ymm9 # qhasm: x5 = v01 | v11 # asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3 # asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2 vpor % ymm10, % ymm2, % ymm2 # qhasm: v00 = x6 & mask4 # asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11 # asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10 vpand % ymm6, % ymm4, % ymm10 # qhasm: v10 = x7 & mask4 # asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5 # asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4 vpand % ymm1, % ymm4, % ymm4 # qhasm: 4x v10 <<= 1 # asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5 # asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4 vpsllq $1, % ymm4, % ymm4 # qhasm: v01 = x6 & mask5 # asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7 # asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6 vpand % ymm6, % ymm5, % ymm6 # qhasm: v11 = x7 & mask5 # asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2 # asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1 vpand % ymm1, % ymm5, % ymm1 # qhasm: 4x v01 unsigned>>= 1 # asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7 # asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6 vpsrlq $1, % ymm6, % ymm6 # qhasm: x6 = v00 | v10 # asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5 # asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4 vpor % ymm10, % ymm4, % ymm4 # qhasm: x7 = v01 | v11 # asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2 # asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1 vpor % ymm6, % ymm1, % ymm1 # qhasm: mem256[ input_0 + 1792 ] = x0 # asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1) # asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi) vmovupd % ymm3, 1792( % rdi) # qhasm: mem256[ input_0 + 1824 ] = x1 # asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1) # asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi) vmovupd % ymm7, 1824( % rdi) # qhasm: mem256[ input_0 + 1856 ] = x2 # asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1) # asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi) vmovupd % ymm8, 1856( % rdi) # qhasm: mem256[ input_0 + 1888 ] = x3 # asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1) # asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi) vmovupd % ymm0, 1888( % rdi) # qhasm: mem256[ input_0 + 1920 ] = x4 # asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1) # asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi) vmovupd % ymm9, 1920( % rdi) # qhasm: mem256[ input_0 + 1952 ] = x5 # asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1) # asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi) vmovupd % ymm2, 1952( % rdi) # qhasm: mem256[ input_0 + 1984 ] = x6 # asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1) # asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi) vmovupd % ymm4, 1984( % rdi) # qhasm: mem256[ input_0 + 2016 ] = x7 # asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1) # asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi) vmovupd % ymm1, 2016( % rdi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
76,827
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece6688128f/avx2/vec256_maa_asm.S
#include "namespace.h" #define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm) #define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm) # qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: reg256 a0 # qhasm: reg256 a1 # qhasm: reg256 a2 # qhasm: reg256 a3 # qhasm: reg256 a4 # qhasm: reg256 a5 # qhasm: reg256 a6 # qhasm: reg256 a7 # qhasm: reg256 a8 # qhasm: reg256 a9 # qhasm: reg256 a10 # qhasm: reg256 a11 # qhasm: reg256 a12 # qhasm: reg256 b0 # qhasm: reg256 b1 # qhasm: reg256 r0 # qhasm: reg256 r1 # qhasm: reg256 r2 # qhasm: reg256 r3 # qhasm: reg256 r4 # qhasm: reg256 r5 # qhasm: reg256 r6 # qhasm: reg256 r7 # qhasm: reg256 r8 # qhasm: reg256 r9 # qhasm: reg256 r10 # qhasm: reg256 r11 # qhasm: reg256 r12 # qhasm: reg256 r13 # qhasm: reg256 r14 # qhasm: reg256 r15 # qhasm: reg256 r16 # qhasm: reg256 r17 # qhasm: reg256 r18 # qhasm: reg256 r19 # qhasm: reg256 r20 # qhasm: reg256 r21 # qhasm: reg256 r22 # qhasm: reg256 r23 # qhasm: reg256 r24 # qhasm: reg256 r # qhasm: enter vec256_maa_asm .p2align 5 .global _vec256_maa_asm .global vec256_maa_asm _vec256_maa_asm: vec256_maa_asm: mov % rsp, % r11 and $31, % r11 add $0, % r11 sub % r11, % rsp # qhasm: b0 = mem256[ input_2 + 0 ] # asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 # asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 vmovupd 0( % rdx), % ymm0 # qhasm: a12 = mem256[ input_1 + 384 ] # asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 # asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 vmovupd 384( % rsi), % ymm1 # qhasm: r12 = a12 & b0 # asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 # asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 vpand % ymm1, % ymm0, % ymm2 # qhasm: r13 = a12 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 # asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 vpand 32( % rdx), % ymm1, % ymm3 # qhasm: r14 = a12 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 # asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 vpand 64( % rdx), % ymm1, % ymm4 # qhasm: r15 = a12 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 # asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 vpand 96( % rdx), % ymm1, % ymm5 # qhasm: r16 = a12 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 # asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 vpand 128( % rdx), % ymm1, % ymm6 # qhasm: r17 = a12 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 # asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 vpand 160( % rdx), % ymm1, % ymm7 # qhasm: r18 = a12 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 # asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 vpand 192( % rdx), % ymm1, % ymm8 # qhasm: r19 = a12 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 # asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 vpand 224( % rdx), % ymm1, % ymm9 # qhasm: r20 = a12 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 # asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 vpand 256( % rdx), % ymm1, % ymm10 # qhasm: r21 = a12 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 # asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 vpand 288( % rdx), % ymm1, % ymm11 # qhasm: r22 = a12 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 # asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 vpand 320( % rdx), % ymm1, % ymm12 # qhasm: r23 = a12 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 # asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 vpand 352( % rdx), % ymm1, % ymm13 # qhasm: r24 = a12 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 # asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 vpand 384( % rdx), % ymm1, % ymm1 # qhasm: r15 ^= r24 # asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 vpxor % ymm1, % ymm5, % ymm5 # qhasm: r14 ^= r24 # asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 vpxor % ymm1, % ymm4, % ymm4 # qhasm: r12 ^= r24 # asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 vpxor % ymm1, % ymm2, % ymm2 # qhasm: r11 = r24 # asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 # asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 vmovapd % ymm1, % ymm1 # qhasm: a11 = mem256[ input_1 + 352 ] # asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 # asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 vmovupd 352( % rsi), % ymm14 # qhasm: r = a11 & b0 # asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a11 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a11 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a11 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a11 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a11 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a11 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a11 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a11 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a11 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a11 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a11 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a11 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r23 ^= r # asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 # asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 vpxor % ymm14, % ymm13, % ymm13 # qhasm: r14 ^= r23 # asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 vpxor % ymm13, % ymm4, % ymm4 # qhasm: r13 ^= r23 # asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 vpxor % ymm13, % ymm3, % ymm3 # qhasm: r11 ^= r23 # asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 vpxor % ymm13, % ymm1, % ymm1 # qhasm: r10 = r23 # asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 # asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 vmovapd % ymm13, % ymm13 # qhasm: a10 = mem256[ input_1 + 320 ] # asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 # asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 vmovupd 320( % rsi), % ymm14 # qhasm: r = a10 & b0 # asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a10 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a10 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a10 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a10 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a10 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a10 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a10 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a10 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a10 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a10 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a10 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a10 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r22 ^= r # asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 # asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 vpxor % ymm14, % ymm12, % ymm12 # qhasm: r13 ^= r22 # asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 vpxor % ymm12, % ymm3, % ymm3 # qhasm: r12 ^= r22 # asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 vpxor % ymm12, % ymm2, % ymm2 # qhasm: r10 ^= r22 # asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 vpxor % ymm12, % ymm13, % ymm13 # qhasm: r9 = r22 # asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 # asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 vmovapd % ymm12, % ymm12 # qhasm: a9 = mem256[ input_1 + 288 ] # asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 # asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 vmovupd 288( % rsi), % ymm14 # qhasm: r = a9 & b0 # asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a9 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a9 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a9 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a9 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a9 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a9 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a9 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a9 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a9 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a9 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a9 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a9 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r21 ^= r # asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 # asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 vpxor % ymm14, % ymm11, % ymm11 # qhasm: r12 ^= r21 # asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 vpxor % ymm11, % ymm2, % ymm2 # qhasm: r11 ^= r21 # asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 vpxor % ymm11, % ymm1, % ymm1 # qhasm: r9 ^= r21 # asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 vpxor % ymm11, % ymm12, % ymm12 # qhasm: r8 = r21 # asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 # asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 vmovapd % ymm11, % ymm11 # qhasm: a8 = mem256[ input_1 + 256 ] # asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 # asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 vmovupd 256( % rsi), % ymm14 # qhasm: r = a8 & b0 # asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a8 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a8 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a8 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a8 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a8 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a8 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a8 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a8 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a8 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a8 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a8 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a8 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r20 ^= r # asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 # asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 vpxor % ymm14, % ymm10, % ymm10 # qhasm: r11 ^= r20 # asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 vpxor % ymm10, % ymm1, % ymm1 # qhasm: r10 ^= r20 # asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 vpxor % ymm10, % ymm13, % ymm13 # qhasm: r8 ^= r20 # asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 vpxor % ymm10, % ymm11, % ymm11 # qhasm: r7 = r20 # asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 # asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 vmovapd % ymm10, % ymm10 # qhasm: a7 = mem256[ input_1 + 224 ] # asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 # asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 vmovupd 224( % rsi), % ymm14 # qhasm: r = a7 & b0 # asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a7 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a7 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a7 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a7 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a7 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a7 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a7 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a7 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a7 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a7 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a7 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a7 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r19 ^= r # asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 # asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 vpxor % ymm14, % ymm9, % ymm9 # qhasm: r10 ^= r19 # asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 vpxor % ymm9, % ymm13, % ymm13 # qhasm: r9 ^= r19 # asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 vpxor % ymm9, % ymm12, % ymm12 # qhasm: r7 ^= r19 # asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 vpxor % ymm9, % ymm10, % ymm10 # qhasm: r6 = r19 # asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 # asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 vmovapd % ymm9, % ymm9 # qhasm: a6 = mem256[ input_1 + 192 ] # asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 # asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 vmovupd 192( % rsi), % ymm14 # qhasm: r = a6 & b0 # asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a6 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a6 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a6 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a6 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a6 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a6 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a6 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a6 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a6 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a6 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a6 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a6 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r18 ^= r # asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 # asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 vpxor % ymm14, % ymm8, % ymm8 # qhasm: r9 ^= r18 # asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 vpxor % ymm8, % ymm12, % ymm12 # qhasm: r8 ^= r18 # asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 vpxor % ymm8, % ymm11, % ymm11 # qhasm: r6 ^= r18 # asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 vpxor % ymm8, % ymm9, % ymm9 # qhasm: r5 = r18 # asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 # asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 vmovapd % ymm8, % ymm8 # qhasm: a5 = mem256[ input_1 + 160 ] # asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 # asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 vmovupd 160( % rsi), % ymm14 # qhasm: r = a5 & b0 # asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a5 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a5 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a5 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a5 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a5 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a5 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a5 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a5 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a5 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a5 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a5 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a5 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r17 ^= r # asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 # asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 vpxor % ymm14, % ymm7, % ymm7 # qhasm: r8 ^= r17 # asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 vpxor % ymm7, % ymm11, % ymm11 # qhasm: r7 ^= r17 # asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 vpxor % ymm7, % ymm10, % ymm10 # qhasm: r5 ^= r17 # asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 vpxor % ymm7, % ymm8, % ymm8 # qhasm: r4 = r17 # asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 # asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 vmovapd % ymm7, % ymm7 # qhasm: a4 = mem256[ input_1 + 128 ] # asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 # asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 vmovupd 128( % rsi), % ymm14 # qhasm: r = a4 & b0 # asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a4 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a4 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a4 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a4 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a4 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a4 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a4 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a4 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a4 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a4 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a4 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a4 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r16 ^= r # asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 # asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 vpxor % ymm14, % ymm6, % ymm6 # qhasm: r7 ^= r16 # asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 vpxor % ymm6, % ymm10, % ymm10 # qhasm: r6 ^= r16 # asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 vpxor % ymm6, % ymm9, % ymm9 # qhasm: r4 ^= r16 # asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 vpxor % ymm6, % ymm7, % ymm7 # qhasm: r3 = r16 # asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 # asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 vmovapd % ymm6, % ymm6 # qhasm: a3 = mem256[ input_1 + 96 ] # asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 # asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 vmovupd 96( % rsi), % ymm14 # qhasm: r = a3 & b0 # asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a3 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a3 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a3 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a3 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a3 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a3 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a3 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a3 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a3 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a3 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a3 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a3 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r15 ^= r # asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 # asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 vpxor % ymm14, % ymm5, % ymm5 # qhasm: r6 ^= r15 # asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 vpxor % ymm5, % ymm9, % ymm9 # qhasm: r5 ^= r15 # asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 vpxor % ymm5, % ymm8, % ymm8 # qhasm: r3 ^= r15 # asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 vpxor % ymm5, % ymm6, % ymm6 # qhasm: r2 = r15 # asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 # asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 vmovapd % ymm5, % ymm5 # qhasm: a2 = mem256[ input_1 + 64 ] # asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 # asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 vmovupd 64( % rsi), % ymm14 # qhasm: r = a2 & b0 # asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a2 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a2 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a2 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a2 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a2 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a2 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a2 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a2 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a2 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a2 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a2 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 vpxor % ymm15, % ymm3, % ymm3 # qhasm: r = a2 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r14 ^= r # asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 # asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 vpxor % ymm14, % ymm4, % ymm4 # qhasm: r5 ^= r14 # asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 vpxor % ymm4, % ymm8, % ymm8 # qhasm: r4 ^= r14 # asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 vpxor % ymm4, % ymm7, % ymm7 # qhasm: r2 ^= r14 # asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 vpxor % ymm4, % ymm5, % ymm5 # qhasm: r1 = r14 # asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 # asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 vmovapd % ymm4, % ymm4 # qhasm: a1 = mem256[ input_1 + 32 ] # asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 # asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 vmovupd 32( % rsi), % ymm14 # qhasm: r = a1 & b0 # asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 # asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 vpand % ymm14, % ymm0, % ymm15 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 vpxor % ymm15, % ymm4, % ymm4 # qhasm: r = a1 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 32( % rdx), % ymm14, % ymm15 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 vpxor % ymm15, % ymm5, % ymm5 # qhasm: r = a1 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 64( % rdx), % ymm14, % ymm15 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 vpxor % ymm15, % ymm6, % ymm6 # qhasm: r = a1 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 96( % rdx), % ymm14, % ymm15 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 vpxor % ymm15, % ymm7, % ymm7 # qhasm: r = a1 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 128( % rdx), % ymm14, % ymm15 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 vpxor % ymm15, % ymm8, % ymm8 # qhasm: r = a1 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 160( % rdx), % ymm14, % ymm15 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 vpxor % ymm15, % ymm9, % ymm9 # qhasm: r = a1 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 192( % rdx), % ymm14, % ymm15 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 vpxor % ymm15, % ymm10, % ymm10 # qhasm: r = a1 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 224( % rdx), % ymm14, % ymm15 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 vpxor % ymm15, % ymm11, % ymm11 # qhasm: r = a1 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 256( % rdx), % ymm14, % ymm15 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 vpxor % ymm15, % ymm12, % ymm12 # qhasm: r = a1 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 288( % rdx), % ymm14, % ymm15 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 vpxor % ymm15, % ymm13, % ymm13 # qhasm: r = a1 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 320( % rdx), % ymm14, % ymm15 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 vpxor % ymm15, % ymm1, % ymm1 # qhasm: r = a1 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 # asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 vpand 352( % rdx), % ymm14, % ymm15 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 vpxor % ymm15, % ymm2, % ymm2 # qhasm: r = a1 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 # asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 vpand 384( % rdx), % ymm14, % ymm14 # qhasm: r13 ^= r # asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 # asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 vpxor % ymm14, % ymm3, % ymm3 # qhasm: r4 ^= r13 # asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 vpxor % ymm3, % ymm7, % ymm7 # qhasm: r3 ^= r13 # asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 vpxor % ymm3, % ymm6, % ymm6 # qhasm: r1 ^= r13 # asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 vpxor % ymm3, % ymm4, % ymm4 # qhasm: r0 = r13 # asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 # asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 vmovapd % ymm3, % ymm3 # qhasm: a0 = mem256[ input_1 + 0 ] # asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 # asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 vmovupd 0( % rsi), % ymm14 # qhasm: r = a0 & b0 # asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 # asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 vpand % ymm14, % ymm0, % ymm0 # qhasm: r0 ^= r # asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 # asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 vpxor % ymm0, % ymm3, % ymm3 # qhasm: r = a0 & mem256[input_2 + 32] # asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 32( % rdx), % ymm14, % ymm0 # qhasm: r1 ^= r # asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 # asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 vpxor % ymm0, % ymm4, % ymm4 # qhasm: r = a0 & mem256[input_2 + 64] # asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 64( % rdx), % ymm14, % ymm0 # qhasm: r2 ^= r # asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 # asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 vpxor % ymm0, % ymm5, % ymm5 # qhasm: r = a0 & mem256[input_2 + 96] # asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 96( % rdx), % ymm14, % ymm0 # qhasm: r3 ^= r # asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 # asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 vpxor % ymm0, % ymm6, % ymm6 # qhasm: r = a0 & mem256[input_2 + 128] # asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 128( % rdx), % ymm14, % ymm0 # qhasm: r4 ^= r # asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 # asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 vpxor % ymm0, % ymm7, % ymm7 # qhasm: r = a0 & mem256[input_2 + 160] # asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 160( % rdx), % ymm14, % ymm0 # qhasm: r5 ^= r # asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 # asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 vpxor % ymm0, % ymm8, % ymm8 # qhasm: r = a0 & mem256[input_2 + 192] # asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 192( % rdx), % ymm14, % ymm0 # qhasm: r6 ^= r # asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 # asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 vpxor % ymm0, % ymm9, % ymm9 # qhasm: r = a0 & mem256[input_2 + 224] # asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 224( % rdx), % ymm14, % ymm0 # qhasm: r7 ^= r # asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 # asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 vpxor % ymm0, % ymm10, % ymm10 # qhasm: r = a0 & mem256[input_2 + 256] # asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 256( % rdx), % ymm14, % ymm0 # qhasm: r8 ^= r # asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 # asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 vpxor % ymm0, % ymm11, % ymm11 # qhasm: r = a0 & mem256[input_2 + 288] # asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 288( % rdx), % ymm14, % ymm0 # qhasm: r9 ^= r # asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 # asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 vpxor % ymm0, % ymm12, % ymm12 # qhasm: r = a0 & mem256[input_2 + 320] # asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 320( % rdx), % ymm14, % ymm0 # qhasm: r10 ^= r # asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 # asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 vpxor % ymm0, % ymm13, % ymm13 # qhasm: r = a0 & mem256[input_2 + 352] # asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 352( % rdx), % ymm14, % ymm0 # qhasm: r11 ^= r # asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 # asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 vpxor % ymm0, % ymm1, % ymm1 # qhasm: r = a0 & mem256[input_2 + 384] # asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 # asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 vpand 384( % rdx), % ymm14, % ymm0 # qhasm: r12 ^= r # asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 # asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 vpxor % ymm0, % ymm2, % ymm2 # qhasm: r12 = r12 ^ mem256[ input_0 + 384 ] # asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1 # asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0 vpxor 384( % rdi), % ymm2, % ymm0 # qhasm: mem256[ input_0 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1) # asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi) vmovupd % ymm0, 384( % rdi) # qhasm: r12 = r12 ^ mem256[ input_1 + 384 ] # asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1 # asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0 vpxor 384( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 384 ] = r12 # asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2) # asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi) vmovupd % ymm0, 384( % rsi) # qhasm: r11 = r11 ^ mem256[ input_0 + 352 ] # asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1 # asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0 vpxor 352( % rdi), % ymm1, % ymm0 # qhasm: mem256[ input_0 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1) # asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi) vmovupd % ymm0, 352( % rdi) # qhasm: r11 = r11 ^ mem256[ input_1 + 352 ] # asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1 # asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0 vpxor 352( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 352 ] = r11 # asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2) # asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi) vmovupd % ymm0, 352( % rsi) # qhasm: r10 = r10 ^ mem256[ input_0 + 320 ] # asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1 # asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0 vpxor 320( % rdi), % ymm13, % ymm0 # qhasm: mem256[ input_0 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1) # asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi) vmovupd % ymm0, 320( % rdi) # qhasm: r10 = r10 ^ mem256[ input_1 + 320 ] # asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1 # asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0 vpxor 320( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 320 ] = r10 # asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2) # asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi) vmovupd % ymm0, 320( % rsi) # qhasm: r9 = r9 ^ mem256[ input_0 + 288 ] # asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1 # asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0 vpxor 288( % rdi), % ymm12, % ymm0 # qhasm: mem256[ input_0 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1) # asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi) vmovupd % ymm0, 288( % rdi) # qhasm: r9 = r9 ^ mem256[ input_1 + 288 ] # asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1 # asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0 vpxor 288( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 288 ] = r9 # asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2) # asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi) vmovupd % ymm0, 288( % rsi) # qhasm: r8 = r8 ^ mem256[ input_0 + 256 ] # asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1 # asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0 vpxor 256( % rdi), % ymm11, % ymm0 # qhasm: mem256[ input_0 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1) # asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi) vmovupd % ymm0, 256( % rdi) # qhasm: r8 = r8 ^ mem256[ input_1 + 256 ] # asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1 # asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0 vpxor 256( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 256 ] = r8 # asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2) # asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi) vmovupd % ymm0, 256( % rsi) # qhasm: r7 = r7 ^ mem256[ input_0 + 224 ] # asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1 # asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0 vpxor 224( % rdi), % ymm10, % ymm0 # qhasm: mem256[ input_0 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1) # asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi) vmovupd % ymm0, 224( % rdi) # qhasm: r7 = r7 ^ mem256[ input_1 + 224 ] # asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1 # asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0 vpxor 224( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 224 ] = r7 # asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2) # asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi) vmovupd % ymm0, 224( % rsi) # qhasm: r6 = r6 ^ mem256[ input_0 + 192 ] # asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1 # asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0 vpxor 192( % rdi), % ymm9, % ymm0 # qhasm: mem256[ input_0 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1) # asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi) vmovupd % ymm0, 192( % rdi) # qhasm: r6 = r6 ^ mem256[ input_1 + 192 ] # asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1 # asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0 vpxor 192( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 192 ] = r6 # asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2) # asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi) vmovupd % ymm0, 192( % rsi) # qhasm: r5 = r5 ^ mem256[ input_0 + 160 ] # asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1 # asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0 vpxor 160( % rdi), % ymm8, % ymm0 # qhasm: mem256[ input_0 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1) # asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi) vmovupd % ymm0, 160( % rdi) # qhasm: r5 = r5 ^ mem256[ input_1 + 160 ] # asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1 # asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0 vpxor 160( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 160 ] = r5 # asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2) # asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi) vmovupd % ymm0, 160( % rsi) # qhasm: r4 = r4 ^ mem256[ input_0 + 128 ] # asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1 # asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0 vpxor 128( % rdi), % ymm7, % ymm0 # qhasm: mem256[ input_0 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1) # asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi) vmovupd % ymm0, 128( % rdi) # qhasm: r4 = r4 ^ mem256[ input_1 + 128 ] # asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1 # asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0 vpxor 128( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 128 ] = r4 # asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2) # asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi) vmovupd % ymm0, 128( % rsi) # qhasm: r3 = r3 ^ mem256[ input_0 + 96 ] # asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1 # asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0 vpxor 96( % rdi), % ymm6, % ymm0 # qhasm: mem256[ input_0 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1) # asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi) vmovupd % ymm0, 96( % rdi) # qhasm: r3 = r3 ^ mem256[ input_1 + 96 ] # asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1 # asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0 vpxor 96( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 96 ] = r3 # asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2) # asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi) vmovupd % ymm0, 96( % rsi) # qhasm: r2 = r2 ^ mem256[ input_0 + 64 ] # asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1 # asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0 vpxor 64( % rdi), % ymm5, % ymm0 # qhasm: mem256[ input_0 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1) # asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi) vmovupd % ymm0, 64( % rdi) # qhasm: r2 = r2 ^ mem256[ input_1 + 64 ] # asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1 # asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0 vpxor 64( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 64 ] = r2 # asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2) # asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi) vmovupd % ymm0, 64( % rsi) # qhasm: r1 = r1 ^ mem256[ input_0 + 32 ] # asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1 # asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0 vpxor 32( % rdi), % ymm4, % ymm0 # qhasm: mem256[ input_0 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1) # asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi) vmovupd % ymm0, 32( % rdi) # qhasm: r1 = r1 ^ mem256[ input_1 + 32 ] # asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1 # asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0 vpxor 32( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 32 ] = r1 # asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2) # asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi) vmovupd % ymm0, 32( % rsi) # qhasm: r0 = r0 ^ mem256[ input_0 + 0 ] # asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1 # asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0 vpxor 0( % rdi), % ymm3, % ymm0 # qhasm: mem256[ input_0 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1) # asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi) vmovupd % ymm0, 0( % rdi) # qhasm: r0 = r0 ^ mem256[ input_1 + 0 ] # asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1 # asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0 vpxor 0( % rsi), % ymm0, % ymm0 # qhasm: mem256[ input_1 + 0 ] = r0 # asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2) # asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi) vmovupd % ymm0, 0( % rsi) # qhasm: return add % r11, % rsp ret
mktmansour/MKT-KSA-Geolocation-Security
2,719
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/avx2/basemul.S
#include "cdecl.h" .macro schoolbook off vmovdqa _16XQINV*2(%rcx),%ymm0 vmovdqa (64*\off+ 0)*2(%rsi),%ymm1 # a0 vmovdqa (64*\off+16)*2(%rsi),%ymm2 # b0 vmovdqa (64*\off+32)*2(%rsi),%ymm3 # a1 vmovdqa (64*\off+48)*2(%rsi),%ymm4 # b1 vpmullw %ymm0,%ymm1,%ymm9 # a0.lo vpmullw %ymm0,%ymm2,%ymm10 # b0.lo vpmullw %ymm0,%ymm3,%ymm11 # a1.lo vpmullw %ymm0,%ymm4,%ymm12 # b1.lo vmovdqa (64*\off+ 0)*2(%rdx),%ymm5 # c0 vmovdqa (64*\off+16)*2(%rdx),%ymm6 # d0 vpmulhw %ymm5,%ymm1,%ymm13 # a0c0.hi vpmulhw %ymm6,%ymm1,%ymm1 # a0d0.hi vpmulhw %ymm5,%ymm2,%ymm14 # b0c0.hi vpmulhw %ymm6,%ymm2,%ymm2 # b0d0.hi vmovdqa (64*\off+32)*2(%rdx),%ymm7 # c1 vmovdqa (64*\off+48)*2(%rdx),%ymm8 # d1 vpmulhw %ymm7,%ymm3,%ymm15 # a1c1.hi vpmulhw %ymm8,%ymm3,%ymm3 # a1d1.hi vpmulhw %ymm7,%ymm4,%ymm0 # b1c1.hi vpmulhw %ymm8,%ymm4,%ymm4 # b1d1.hi vmovdqa %ymm13,(%rsp) vpmullw %ymm5,%ymm9,%ymm13 # a0c0.lo vpmullw %ymm6,%ymm9,%ymm9 # a0d0.lo vpmullw %ymm5,%ymm10,%ymm5 # b0c0.lo vpmullw %ymm6,%ymm10,%ymm10 # b0d0.lo vpmullw %ymm7,%ymm11,%ymm6 # a1c1.lo vpmullw %ymm8,%ymm11,%ymm11 # a1d1.lo vpmullw %ymm7,%ymm12,%ymm7 # b1c1.lo vpmullw %ymm8,%ymm12,%ymm12 # b1d1.lo vmovdqa _16XQ*2(%rcx),%ymm8 vpmulhw %ymm8,%ymm13,%ymm13 vpmulhw %ymm8,%ymm9,%ymm9 vpmulhw %ymm8,%ymm5,%ymm5 vpmulhw %ymm8,%ymm10,%ymm10 vpmulhw %ymm8,%ymm6,%ymm6 vpmulhw %ymm8,%ymm11,%ymm11 vpmulhw %ymm8,%ymm7,%ymm7 vpmulhw %ymm8,%ymm12,%ymm12 vpsubw (%rsp),%ymm13,%ymm13 # -a0c0 vpsubw %ymm9,%ymm1,%ymm9 # a0d0 vpsubw %ymm5,%ymm14,%ymm5 # b0c0 vpsubw %ymm10,%ymm2,%ymm10 # b0d0 vpsubw %ymm6,%ymm15,%ymm6 # a1c1 vpsubw %ymm11,%ymm3,%ymm11 # a1d1 vpsubw %ymm7,%ymm0,%ymm7 # b1c1 vpsubw %ymm12,%ymm4,%ymm12 # b1d1 vmovdqa (%r9),%ymm0 vmovdqa 32(%r9),%ymm1 vpmullw %ymm0,%ymm10,%ymm2 vpmullw %ymm0,%ymm12,%ymm3 vpmulhw %ymm1,%ymm10,%ymm10 vpmulhw %ymm1,%ymm12,%ymm12 vpmulhw %ymm8,%ymm2,%ymm2 vpmulhw %ymm8,%ymm3,%ymm3 vpsubw %ymm2,%ymm10,%ymm10 # rb0d0 vpsubw %ymm3,%ymm12,%ymm12 # rb1d1 vpaddw %ymm5,%ymm9,%ymm9 vpaddw %ymm7,%ymm11,%ymm11 vpsubw %ymm13,%ymm10,%ymm13 vpsubw %ymm12,%ymm6,%ymm6 vmovdqa %ymm13,(64*\off+ 0)*2(%rdi) vmovdqa %ymm9,(64*\off+16)*2(%rdi) vmovdqa %ymm6,(64*\off+32)*2(%rdi) vmovdqa %ymm11,(64*\off+48)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM768_AVX2_basemul_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_basemul_avx) cdecl(PQCLEAN_MLKEM768_AVX2_basemul_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_basemul_avx): mov %rsp,%r8 and $-32,%rsp sub $32,%rsp lea (_ZETAS_EXP+176)*2(%rcx),%r9 schoolbook 0 add $32*2,%r9 schoolbook 1 add $192*2,%r9 schoolbook 2 add $32*2,%r9 schoolbook 3 mov %r8,%rsp ret
mktmansour/MKT-KSA-Geolocation-Security
4,676
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/avx2/shuffle.S
#include "cdecl.h" .include "fq.inc" .include "shuffle.inc" /* nttpack_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 #store vmovdqa %ymm7,(%rdi) vmovdqa %ymm9,32(%rdi) vmovdqa %ymm6,64(%rdi) vmovdqa %ymm3,96(%rdi) vmovdqa %ymm10,128(%rdi) vmovdqa %ymm4,160(%rdi) vmovdqa %ymm5,192(%rdi) vmovdqa %ymm11,224(%rdi) ret */ .text nttunpack128_avx: #load vmovdqa (%rdi),%ymm4 vmovdqa 32(%rdi),%ymm5 vmovdqa 64(%rdi),%ymm6 vmovdqa 96(%rdi),%ymm7 vmovdqa 128(%rdi),%ymm8 vmovdqa 160(%rdi),%ymm9 vmovdqa 192(%rdi),%ymm10 vmovdqa 224(%rdi),%ymm11 shuffle8 4,8,3,8 shuffle8 5,9,4,9 shuffle8 6,10,5,10 shuffle8 7,11,6,11 shuffle4 3,5,7,5 shuffle4 8,10,3,10 shuffle4 4,6,8,6 shuffle4 9,11,4,11 shuffle2 7,8,9,8 shuffle2 5,6,7,6 shuffle2 3,4,5,4 shuffle2 10,11,3,11 shuffle1 9,5,10,5 shuffle1 8,4,9,4 shuffle1 7,3,8,3 shuffle1 6,11,7,11 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm5,32(%rdi) vmovdqa %ymm9,64(%rdi) vmovdqa %ymm4,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm3,160(%rdi) vmovdqa %ymm7,192(%rdi) vmovdqa %ymm11,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM768_AVX2_nttunpack_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_nttunpack_avx) cdecl(PQCLEAN_MLKEM768_AVX2_nttunpack_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_nttunpack_avx): call nttunpack128_avx add $256,%rdi call nttunpack128_avx ret ntttobytes128_avx: #load vmovdqa (%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm7 vmovdqa 96(%rsi),%ymm8 vmovdqa 128(%rsi),%ymm9 vmovdqa 160(%rsi),%ymm10 vmovdqa 192(%rsi),%ymm11 vmovdqa 224(%rsi),%ymm12 #csubq csubq 5,13 csubq 6,13 csubq 7,13 csubq 8,13 csubq 9,13 csubq 10,13 csubq 11,13 csubq 12,13 #bitpack vpsllw $12,%ymm6,%ymm4 vpor %ymm4,%ymm5,%ymm4 vpsrlw $4,%ymm6,%ymm5 vpsllw $8,%ymm7,%ymm6 vpor %ymm5,%ymm6,%ymm5 vpsrlw $8,%ymm7,%ymm6 vpsllw $4,%ymm8,%ymm7 vpor %ymm6,%ymm7,%ymm6 vpsllw $12,%ymm10,%ymm7 vpor %ymm7,%ymm9,%ymm7 vpsrlw $4,%ymm10,%ymm8 vpsllw $8,%ymm11,%ymm9 vpor %ymm8,%ymm9,%ymm8 vpsrlw $8,%ymm11,%ymm9 vpsllw $4,%ymm12,%ymm10 vpor %ymm9,%ymm10,%ymm9 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle2 3,4,8,4 shuffle2 6,5,3,5 shuffle2 7,9,6,9 shuffle4 8,3,7,3 shuffle4 6,4,8,4 shuffle4 5,9,6,9 shuffle8 7,8,5,8 shuffle8 6,3,7,3 shuffle8 4,9,6,9 #store vmovdqu %ymm5,(%rdi) vmovdqu %ymm7,32(%rdi) vmovdqu %ymm6,64(%rdi) vmovdqu %ymm8,96(%rdi) vmovdqu %ymm3,128(%rdi) vmovdqu %ymm9,160(%rdi) ret .global cdecl(PQCLEAN_MLKEM768_AVX2_ntttobytes_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_ntttobytes_avx) cdecl(PQCLEAN_MLKEM768_AVX2_ntttobytes_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_ntttobytes_avx): #consts vmovdqa _16XQ*2(%rdx),%ymm0 call ntttobytes128_avx add $256,%rsi add $192,%rdi call ntttobytes128_avx ret nttfrombytes128_avx: #load vmovdqu (%rsi),%ymm4 vmovdqu 32(%rsi),%ymm5 vmovdqu 64(%rsi),%ymm6 vmovdqu 96(%rsi),%ymm7 vmovdqu 128(%rsi),%ymm8 vmovdqu 160(%rsi),%ymm9 shuffle8 4,7,3,7 shuffle8 5,8,4,8 shuffle8 6,9,5,9 shuffle4 3,8,6,8 shuffle4 7,5,3,5 shuffle4 4,9,7,9 shuffle2 6,5,4,5 shuffle2 8,7,6,7 shuffle2 3,9,8,9 shuffle1 4,7,10,7 shuffle1 5,8,4,8 shuffle1 6,9,5,9 #bitunpack vpsrlw $12,%ymm10,%ymm11 vpsllw $4,%ymm7,%ymm12 vpor %ymm11,%ymm12,%ymm11 vpand %ymm0,%ymm10,%ymm10 vpand %ymm0,%ymm11,%ymm11 vpsrlw $8,%ymm7,%ymm12 vpsllw $8,%ymm4,%ymm13 vpor %ymm12,%ymm13,%ymm12 vpand %ymm0,%ymm12,%ymm12 vpsrlw $4,%ymm4,%ymm13 vpand %ymm0,%ymm13,%ymm13 vpsrlw $12,%ymm8,%ymm14 vpsllw $4,%ymm5,%ymm15 vpor %ymm14,%ymm15,%ymm14 vpand %ymm0,%ymm8,%ymm8 vpand %ymm0,%ymm14,%ymm14 vpsrlw $8,%ymm5,%ymm15 vpsllw $8,%ymm9,%ymm1 vpor %ymm15,%ymm1,%ymm15 vpand %ymm0,%ymm15,%ymm15 vpsrlw $4,%ymm9,%ymm1 vpand %ymm0,%ymm1,%ymm1 #store vmovdqa %ymm10,(%rdi) vmovdqa %ymm11,32(%rdi) vmovdqa %ymm12,64(%rdi) vmovdqa %ymm13,96(%rdi) vmovdqa %ymm8,128(%rdi) vmovdqa %ymm14,160(%rdi) vmovdqa %ymm15,192(%rdi) vmovdqa %ymm1,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM768_AVX2_nttfrombytes_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_nttfrombytes_avx) cdecl(PQCLEAN_MLKEM768_AVX2_nttfrombytes_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_nttfrombytes_avx): #consts vmovdqa _16XMASK*2(%rdx),%ymm0 call nttfrombytes128_avx add $256,%rdi add $192,%rsi call nttfrombytes128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
1,797
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/avx2/fq.S
#include "cdecl.h" .include "fq.inc" .text reduce128_avx: #load vmovdqa (%rdi),%ymm2 vmovdqa 32(%rdi),%ymm3 vmovdqa 64(%rdi),%ymm4 vmovdqa 96(%rdi),%ymm5 vmovdqa 128(%rdi),%ymm6 vmovdqa 160(%rdi),%ymm7 vmovdqa 192(%rdi),%ymm8 vmovdqa 224(%rdi),%ymm9 red16 2 red16 3 red16 4 red16 5 red16 6 red16 7 red16 8 red16 9 #store vmovdqa %ymm2,(%rdi) vmovdqa %ymm3,32(%rdi) vmovdqa %ymm4,64(%rdi) vmovdqa %ymm5,96(%rdi) vmovdqa %ymm6,128(%rdi) vmovdqa %ymm7,160(%rdi) vmovdqa %ymm8,192(%rdi) vmovdqa %ymm9,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM768_AVX2_reduce_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_reduce_avx) cdecl(PQCLEAN_MLKEM768_AVX2_reduce_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_reduce_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XV*2(%rsi),%ymm1 call reduce128_avx add $256,%rdi call reduce128_avx ret tomont128_avx: #load vmovdqa (%rdi),%ymm3 vmovdqa 32(%rdi),%ymm4 vmovdqa 64(%rdi),%ymm5 vmovdqa 96(%rdi),%ymm6 vmovdqa 128(%rdi),%ymm7 vmovdqa 160(%rdi),%ymm8 vmovdqa 192(%rdi),%ymm9 vmovdqa 224(%rdi),%ymm10 fqmulprecomp 1,2,3,11 fqmulprecomp 1,2,4,12 fqmulprecomp 1,2,5,13 fqmulprecomp 1,2,6,14 fqmulprecomp 1,2,7,15 fqmulprecomp 1,2,8,11 fqmulprecomp 1,2,9,12 fqmulprecomp 1,2,10,13 #store vmovdqa %ymm3,(%rdi) vmovdqa %ymm4,32(%rdi) vmovdqa %ymm5,64(%rdi) vmovdqa %ymm6,96(%rdi) vmovdqa %ymm7,128(%rdi) vmovdqa %ymm8,160(%rdi) vmovdqa %ymm9,192(%rdi) vmovdqa %ymm10,224(%rdi) ret .global cdecl(PQCLEAN_MLKEM768_AVX2_tomont_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_tomont_avx) cdecl(PQCLEAN_MLKEM768_AVX2_tomont_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_tomont_avx): #consts vmovdqa _16XQ*2(%rsi),%ymm0 vmovdqa _16XMONTSQLO*2(%rsi),%ymm1 vmovdqa _16XMONTSQHI*2(%rsi),%ymm2 call tomont128_avx add $256,%rdi call tomont128_avx ret
mktmansour/MKT-KSA-Geolocation-Security
4,178
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/avx2/ntt.S
#include "cdecl.h" .include "shuffle.inc" .macro mul rh0,rh1,rh2,rh3,zl0=15,zl1=15,zh0=2,zh1=2 vpmullw %ymm\zl0,%ymm\rh0,%ymm12 vpmullw %ymm\zl0,%ymm\rh1,%ymm13 vpmullw %ymm\zl1,%ymm\rh2,%ymm14 vpmullw %ymm\zl1,%ymm\rh3,%ymm15 vpmulhw %ymm\zh0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm\zh0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm\zh1,%ymm\rh2,%ymm\rh2 vpmulhw %ymm\zh1,%ymm\rh3,%ymm\rh3 .endm .macro reduce vpmulhw %ymm0,%ymm12,%ymm12 vpmulhw %ymm0,%ymm13,%ymm13 vpmulhw %ymm0,%ymm14,%ymm14 vpmulhw %ymm0,%ymm15,%ymm15 .endm .macro update rln,rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rln vpsubw %ymm\rh0,%ymm\rl0,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl0 vpsubw %ymm\rh1,%ymm\rl1,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl1 vpsubw %ymm\rh2,%ymm\rl2,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl2 vpsubw %ymm\rh3,%ymm\rl3,%ymm\rh3 vpsubw %ymm12,%ymm\rln,%ymm\rln vpaddw %ymm12,%ymm\rh0,%ymm\rh0 vpsubw %ymm13,%ymm\rl0,%ymm\rl0 vpaddw %ymm13,%ymm\rh1,%ymm\rh1 vpsubw %ymm14,%ymm\rl1,%ymm\rl1 vpaddw %ymm14,%ymm\rh2,%ymm\rh2 vpsubw %ymm15,%ymm\rl2,%ymm\rl2 vpaddw %ymm15,%ymm\rh3,%ymm\rh3 .endm .macro level0 off vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm15 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 vmovdqa %ymm3,(64*\off+ 0)*2(%rdi) vmovdqa %ymm4,(64*\off+ 16)*2(%rdi) vmovdqa %ymm5,(64*\off+ 32)*2(%rdi) vmovdqa %ymm6,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .macro levels1t6 off /* level 1 */ vmovdqa (_ZETAS_EXP+224*\off+16)*2(%rsi),%ymm15 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+112)*2(%rdi),%ymm11 vmovdqa (_ZETAS_EXP+224*\off+32)*2(%rsi),%ymm2 mul 8,9,10,11 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 reduce update 3,4,5,6,7,8,9,10,11 /* level 2 */ shuffle8 5,10,7,10 shuffle8 6,11,5,11 vmovdqa (_ZETAS_EXP+224*\off+48)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+64)*2(%rsi),%ymm2 mul 7,10,5,11 shuffle8 3,8,6,8 shuffle8 4,9,3,9 reduce update 4,6,8,3,9,7,10,5,11 /* level 3 */ shuffle4 8,5,9,5 shuffle4 3,11,8,11 vmovdqa (_ZETAS_EXP+224*\off+80)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+96)*2(%rsi),%ymm2 mul 9,5,8,11 shuffle4 4,7,3,7 shuffle4 6,10,4,10 reduce update 6,3,7,4,10,9,5,8,11 /* level 4 */ shuffle2 7,8,10,8 shuffle2 4,11,7,11 vmovdqa (_ZETAS_EXP+224*\off+112)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+128)*2(%rsi),%ymm2 mul 10,8,7,11 shuffle2 6,9,4,9 shuffle2 3,5,6,5 reduce update 3,4,9,6,5,10,8,7,11 /* level 5 */ shuffle1 9,7,5,7 shuffle1 6,11,9,11 vmovdqa (_ZETAS_EXP+224*\off+144)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+160)*2(%rsi),%ymm2 mul 5,7,9,11 shuffle1 3,10,6,10 shuffle1 4,8,3,8 reduce update 4,6,10,3,8,5,7,9,11 /* level 6 */ vmovdqa (_ZETAS_EXP+224*\off+176)*2(%rsi),%ymm14 vmovdqa (_ZETAS_EXP+224*\off+208)*2(%rsi),%ymm15 vmovdqa (_ZETAS_EXP+224*\off+192)*2(%rsi),%ymm8 vmovdqa (_ZETAS_EXP+224*\off+224)*2(%rsi),%ymm2 mul 10,3,9,11,14,15,8,2 reduce update 8,4,6,5,7,10,3,9,11 vmovdqa %ymm8,(128*\off+ 0)*2(%rdi) vmovdqa %ymm4,(128*\off+ 16)*2(%rdi) vmovdqa %ymm10,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm6,(128*\off+ 64)*2(%rdi) vmovdqa %ymm5,(128*\off+ 80)*2(%rdi) vmovdqa %ymm9,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM768_AVX2_ntt_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_ntt_avx) cdecl(PQCLEAN_MLKEM768_AVX2_ntt_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_ntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 level0 0 level0 1 levels1t6 0 levels1t6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
4,787
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/avx2/invntt.S
#include "cdecl.h" .include "shuffle.inc" .include "fq.inc" .macro butterfly rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3,zl0=2,zl1=2,zh0=3,zh1=3 vpsubw %ymm\rl0,%ymm\rh0,%ymm12 vpaddw %ymm\rh0,%ymm\rl0,%ymm\rl0 vpsubw %ymm\rl1,%ymm\rh1,%ymm13 vpmullw %ymm\zl0,%ymm12,%ymm\rh0 vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl1 vpsubw %ymm\rl2,%ymm\rh2,%ymm14 vpmullw %ymm\zl0,%ymm13,%ymm\rh1 vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl2 vpsubw %ymm\rl3,%ymm\rh3,%ymm15 vpmullw %ymm\zl1,%ymm14,%ymm\rh2 vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl3 vpmullw %ymm\zl1,%ymm15,%ymm\rh3 vpmulhw %ymm\zh0,%ymm12,%ymm12 vpmulhw %ymm\zh0,%ymm13,%ymm13 vpmulhw %ymm\zh1,%ymm14,%ymm14 vpmulhw %ymm\zh1,%ymm15,%ymm15 vpmulhw %ymm0,%ymm\rh0,%ymm\rh0 vpmulhw %ymm0,%ymm\rh1,%ymm\rh1 vpmulhw %ymm0,%ymm\rh2,%ymm\rh2 vpmulhw %ymm0,%ymm\rh3,%ymm\rh3 # # vpsubw %ymm\rh0,%ymm12,%ymm\rh0 vpsubw %ymm\rh1,%ymm13,%ymm\rh1 vpsubw %ymm\rh2,%ymm14,%ymm\rh2 vpsubw %ymm\rh3,%ymm15,%ymm\rh3 .endm .macro intt_levels0t5 off /* level 0 */ vmovdqa _16XFLO*2(%rsi),%ymm2 vmovdqa _16XFHI*2(%rsi),%ymm3 vmovdqa (128*\off+ 0)*2(%rdi),%ymm4 vmovdqa (128*\off+ 32)*2(%rdi),%ymm6 vmovdqa (128*\off+ 16)*2(%rdi),%ymm5 vmovdqa (128*\off+ 48)*2(%rdi),%ymm7 fqmulprecomp 2,3,4 fqmulprecomp 2,3,6 fqmulprecomp 2,3,5 fqmulprecomp 2,3,7 vmovdqa (128*\off+ 64)*2(%rdi),%ymm8 vmovdqa (128*\off+ 96)*2(%rdi),%ymm10 vmovdqa (128*\off+ 80)*2(%rdi),%ymm9 vmovdqa (128*\off+112)*2(%rdi),%ymm11 fqmulprecomp 2,3,8 fqmulprecomp 2,3,10 fqmulprecomp 2,3,9 fqmulprecomp 2,3,11 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+208)*2(%rsi),%ymm15 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+176)*2(%rsi),%ymm1 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+224)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+192)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm12 vpshufb %ymm12,%ymm15,%ymm15 vpshufb %ymm12,%ymm1,%ymm1 vpshufb %ymm12,%ymm2,%ymm2 vpshufb %ymm12,%ymm3,%ymm3 butterfly 4,5,8,9,6,7,10,11,15,1,2,3 /* level 1 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+144)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+160)*2(%rsi),%ymm3 vmovdqa _REVIDXB*2(%rsi),%ymm1 vpshufb %ymm1,%ymm2,%ymm2 vpshufb %ymm1,%ymm3,%ymm3 butterfly 4,5,6,7,8,9,10,11,2,2,3,3 shuffle1 4,5,3,5 shuffle1 6,7,4,7 shuffle1 8,9,6,9 shuffle1 10,11,8,11 /* level 2 */ vmovdqa _REVIDXD*2(%rsi),%ymm12 vpermd (_ZETAS_EXP+(1-\off)*224+112)*2(%rsi),%ymm12,%ymm2 vpermd (_ZETAS_EXP+(1-\off)*224+128)*2(%rsi),%ymm12,%ymm10 butterfly 3,4,6,8,5,7,9,11,2,2,10,10 vmovdqa _16XV*2(%rsi),%ymm1 red16 3 shuffle2 3,4,10,4 shuffle2 6,8,3,8 shuffle2 5,7,6,7 shuffle2 9,11,5,11 /* level 3 */ vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+80)*2(%rsi),%ymm2 vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+96)*2(%rsi),%ymm9 butterfly 10,3,6,5,4,8,7,11,2,2,9,9 shuffle4 10,3,9,3 shuffle4 6,5,10,5 shuffle4 4,8,6,8 shuffle4 7,11,4,11 /* level 4 */ vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+48)*2(%rsi),%ymm2 vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+64)*2(%rsi),%ymm7 butterfly 9,10,6,4,3,5,8,11,2,2,7,7 red16 9 shuffle8 9,10,7,10 shuffle8 6,4,9,4 shuffle8 3,5,6,5 shuffle8 8,11,3,11 /* level 5 */ vmovdqa (_ZETAS_EXP+(1-\off)*224+16)*2(%rsi),%ymm2 vmovdqa (_ZETAS_EXP+(1-\off)*224+32)*2(%rsi),%ymm8 butterfly 7,9,6,3,10,4,5,11,2,2,8,8 vmovdqa %ymm7,(128*\off+ 0)*2(%rdi) vmovdqa %ymm9,(128*\off+ 16)*2(%rdi) vmovdqa %ymm6,(128*\off+ 32)*2(%rdi) vmovdqa %ymm3,(128*\off+ 48)*2(%rdi) vmovdqa %ymm10,(128*\off+ 64)*2(%rdi) vmovdqa %ymm4,(128*\off+ 80)*2(%rdi) vmovdqa %ymm5,(128*\off+ 96)*2(%rdi) vmovdqa %ymm11,(128*\off+112)*2(%rdi) .endm .macro intt_level6 off /* level 6 */ vmovdqa (64*\off+ 0)*2(%rdi),%ymm4 vmovdqa (64*\off+128)*2(%rdi),%ymm8 vmovdqa (64*\off+ 16)*2(%rdi),%ymm5 vmovdqa (64*\off+144)*2(%rdi),%ymm9 vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm2 vmovdqa (64*\off+ 32)*2(%rdi),%ymm6 vmovdqa (64*\off+160)*2(%rdi),%ymm10 vmovdqa (64*\off+ 48)*2(%rdi),%ymm7 vmovdqa (64*\off+176)*2(%rdi),%ymm11 vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm3 butterfly 4,5,6,7,8,9,10,11 .if \off == 0 red16 4 .endif vmovdqa %ymm4,(64*\off+ 0)*2(%rdi) vmovdqa %ymm5,(64*\off+ 16)*2(%rdi) vmovdqa %ymm6,(64*\off+ 32)*2(%rdi) vmovdqa %ymm7,(64*\off+ 48)*2(%rdi) vmovdqa %ymm8,(64*\off+128)*2(%rdi) vmovdqa %ymm9,(64*\off+144)*2(%rdi) vmovdqa %ymm10,(64*\off+160)*2(%rdi) vmovdqa %ymm11,(64*\off+176)*2(%rdi) .endm .text .global cdecl(PQCLEAN_MLKEM768_AVX2_invntt_avx) .global _cdecl(PQCLEAN_MLKEM768_AVX2_invntt_avx) cdecl(PQCLEAN_MLKEM768_AVX2_invntt_avx): _cdecl(PQCLEAN_MLKEM768_AVX2_invntt_avx): vmovdqa _16XQ*2(%rsi),%ymm0 intt_levels0t5 0 intt_levels0t5 1 intt_level6 0 intt_level6 1 ret
mktmansour/MKT-KSA-Geolocation-Security
12,888
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/aarch64/__asm_iNTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_bot .global _PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_bot PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_bot: _PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_bot: push_all Q .req w20 BarrettM .req w21 src0 .req x0 src1 .req x1 table .req x28 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, #1*16] ldr q29, [src1, #1*16] ldr q30, [src0, #3*16] ldr q31, [src1, #3*16] trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 mov counter, #3 _intt_bot_loop: str q24, [src0, #0*16] ldr q28, [src0, #(64+1*16)] str q25, [src1, #0*16] ldr q29, [src1, #(64+1*16)] str q26, [src0, #2*16] ldr q30, [src0, #(64+3*16)] str q27, [src1, #2*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_2l4 v28, v29, v30, v31, v20, v21, v22, v23, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 trn_4x4_2l4 v24, v25, v26, v27, v20, v21, v22, v23, table, table, q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 do_butterfly_vec_bot v28, v30, v18, v19, v29, v31, v0, v12, v13, v14, v15 do_butterfly_vec_mix_rev_l4 \ v18, v19, v29, v31, \ v24, v26, v16, v17, v25, v27, v0, v12, v13, v14, v15, \ table, \ q8, q9, q10, q11, \ #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mix_rev_l4 \ v16, v17, v25, v27, \ v28, v29, v18, v19, v30, v31, v0, v8, v9, v10, v11, \ table, \ q4, q5, q6, q7, \ #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mix_rev_l3 \ v18, v19, v30, v31, \ v24, v25, v16, v17, v26, v27, v0, v6, v7, v6, v7, \ table, \ q1, q2, q3, \ #1*16, #2*16, #3*16 do_butterfly_vec_mix_rev v24, v25, v16, v17, v26, v27, v24, v25, v18, v19, v28, v29, v0, v4, v5, v4, v5, v2, v3, v2, v3 do_butterfly_vec_mix_rev v24, v25, v18, v19, v28, v29, v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3, v2, v3, v2, v3 do_butterfly_vec_top v26, v27, v16, v17, v30, v31, v0, v2, v3, v2, v3 oo_barrett v24, v25, v26, v27, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, #11, v0 add table, table, #256 trn_4x4 v28, v29, v30, v31, v16, v17, v18, v19 trn_4x4_2s4 v24, v25, v26, v27, v16, v17, v18, v19, src0, src1, q28, q29, q30, q31, #1*16, #1*16, #3*16, #3*16 sub counter, counter, #1 cbnz counter, _intt_bot_loop str q24, [src0, #0*16] str q25, [src1, #0*16] str q26, [src0, #2*16] str q27, [src1, #2*16] .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_top .global _PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_top PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_top: _PQCLEAN_MLKEM768_AARCH64__asm_intt_SIMD_top: push_all Q .req w20 BarrettM .req w21 invN .req w22 invN_f .req w23 src .req x0 table .req x1 counter .req x19 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] ldr invN, [x2, #10] ldr invN_f, [x2, #14] mov v4.S[0], invN mov v4.S[1], invN_f ldr q0, [table, #0*16] mov v0.H[0], Q ldr q1, [table, #1*16] ldr q2, [table, #2*16] ldr q3, [table, #3*16] ldr q16, [src, # 8*32] ldr q17, [src, # 9*32] ldr q18, [src, #10*32] ldr q19, [src, #11*32] ldr q20, [src, #12*32] ldr q21, [src, #13*32] ldr q22, [src, #14*32] ldr q23, [src, #15*32] qo_butterfly_botll \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q12, q13, q14, q15, \ #4*32, #5*32, #6*32, #7*32 qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_topsl \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #8*32, #9*32, #10*32, #11*32, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_insl \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #12*32, #13*32, #14*32, #15*32, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_butterfly_botsl_mul \ v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, \ src, \ q8, q9, q10, q11, \ #0*32, #1*32, #2*32, #3*32, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32), \ v12, v13, v14, v15, v24, v25, v26, v27, \ v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0 str q12, [src, # 4*32] ldr q12, [src, #(16+ 4*32)] str q13, [src, # 5*32] ldr q13, [src, #(16+ 5*32)] str q14, [src, # 6*32] ldr q14, [src, #(16+ 6*32)] str q15, [src, # 7*32] ldr q15, [src, #(16+ 7*32)] qo_butterfly_mix_rev v16, v18, v20, v22, v28, v29, v30, v31, v17, v19, v21, v23, v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v0, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7 qo_butterfly_mix_rev v8, v10, v12, v14, v24, v25, v26, v27, v9, v11, v13, v15, v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v0, v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7 qo_butterfly_mix_rev v16, v17, v20, v21, v28, v29, v30, v31, v18, v19, v22, v23, v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v0, v1, 4, 5, v1, 4, 5, v1, 6, 7, v1, 6, 7, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3 qo_butterfly_mix_rev v8, v9, v12, v13, v24, v25, v26, v27, v10, v11, v14, v15, v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v0, v1, 0, 1, v1, 0, 1, v1, 2, 3, v1, 2, 3, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7 qo_butterfly_mix_rev v16, v17, v18, v19, v28, v29, v30, v31, v20, v21, v22, v23, v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v0, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 6, 7, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5 qo_butterfly_mix_rev v8, v9, v10, v11, v24, v25, v26, v27, v12, v13, v14, v15, v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v0, v0, 4, 5, v0, 4, 5, v0, 4, 5, v0, 4, 5, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_mix_rev v8, v9, v10, v11, v28, v29, v30, v31, v16, v17, v18, v19, v12, v13, v14, v15, v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3 qo_butterfly_tops \ v24, v25, v26, v27, v20, v21, v22, v23, v0, v4, 2, 3, v4, 2, 3, v4, 2, 3, v4, 2, 3, \ src, \ q16, q17, q18, q19, \ #(16+8*32), #(16+9*32), #(16+10*32), #(16+11*32) qo_montgomery_mul_ins \ v8, v9, v10, v11, v28, v29, v30, v31, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q20, q21, q22, q23, \ #(16+12*32), #(16+13*32), #(16+14*32), #(16+15*32) qo_montgomery_mul_ins \ v12, v13, v14, v15, v24, v25, v26, v27, v0, v4, 1, 0, v4, 1, 0, v4, 1, 0, v4, 1, 0, \ src, \ q8, q9, q10, q11, \ #(16+0*32), #(16+1*32), #(16+2*32), #(16+3*32) str q12, [src, #(16+ 4*32)] str q13, [src, #(16+ 5*32)] str q14, [src, #(16+ 6*32)] str q15, [src, #(16+ 7*32)] .unreq Q .unreq BarrettM .unreq invN .unreq invN_f .unreq src .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
23,888
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/aarch64/__asm_base_mul.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" #include "params.h" .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_point_mul_extended .global _PQCLEAN_MLKEM768_AARCH64__asm_point_mul_extended PQCLEAN_MLKEM768_AARCH64__asm_point_mul_extended: _PQCLEAN_MLKEM768_AARCH64__asm_point_mul_extended: push_all Q .req w20 des .req x0 src1 .req x1 src2ex .req x2 counter .req x19 ldrsh Q, [x3] dup v28.8H, Q ldr q0, [src1, #0*16] ldr q1, [src1, #1*16] ldr q2, [src1, #2*16] ldr q3, [src1, #3*16] ldr q4, [src1, #4*16] ldr q5, [src1, #5*16] ldr q6, [src1, #6*16] ldr q7, [src1, #7*16] add src1, src1, #8*16 uzp2 v1.8H, v0.8H, v1.8H uzp2 v3.8H, v2.8H, v3.8H uzp2 v5.8H, v4.8H, v5.8H uzp2 v7.8H, v6.8H, v7.8H ldr q8, [src2ex, #0*16] ldr q10, [src2ex, #2*16] ldr q12, [src2ex, #4*16] ldr q14, [src2ex, #6*16] ldr q9, [src2ex, #1*16] ldr q11, [src2ex, #3*16] ldr q13, [src2ex, #5*16] ldr q15, [src2ex, #7*16] add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q0, [src1, #0*16] sqrdmulh v16.8H, v17.8H, v8.8H ldr q1, [src1, #1*16] sqrdmulh v18.8H, v19.8H, v10.8H ldr q2, [src1, #2*16] sqrdmulh v20.8H, v21.8H, v12.8H ldr q3, [src1, #3*16] sqrdmulh v22.8H, v23.8H, v14.8H ldr q4, [src1, #4*16] mul v17.8H, v17.8H, v9.8H uzp2 v1.8H, v0.8H, v1.8H ldr q5, [src1, #5*16] mul v19.8H, v19.8H, v11.8H uzp2 v3.8H, v2.8H, v3.8H ldr q6, [src1, #6*16] mul v21.8H, v21.8H, v13.8H uzp2 v5.8H, v4.8H, v5.8H ldr q7, [src1, #7*16] mul v23.8H, v23.8H, v15.8H uzp2 v7.8H, v6.8H, v7.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v17.8H, v16.8H, v28.8H ldr q10, [src2ex, #2*16] mls v19.8H, v18.8H, v28.8H ldr q12, [src2ex, #4*16] mls v21.8H, v20.8H, v28.8H ldr q14, [src2ex, #6*16] mls v23.8H, v22.8H, v28.8H ldr q9, [src2ex, #1*16] str q17, [des, #0*16] ldr q11, [src2ex, #3*16] str q19, [des, #1*16] ldr q13, [src2ex, #5*16] str q21, [des, #2*16] ldr q15, [src2ex, #7*16] str q23, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 ldr q16, [src1, #0*16] sqrdmulh v0.8H, v1.8H, v8.8H ldr q17, [src1, #1*16] sqrdmulh v2.8H, v3.8H, v10.8H ldr q18, [src1, #2*16] sqrdmulh v4.8H, v5.8H, v12.8H ldr q19, [src1, #3*16] sqrdmulh v6.8H, v7.8H, v14.8H ldr q20, [src1, #4*16] mul v1.8H, v1.8H, v9.8H uzp2 v17.8H, v16.8H, v17.8H ldr q21, [src1, #5*16] mul v3.8H, v3.8H, v11.8H uzp2 v19.8H, v18.8H, v19.8H ldr q22, [src1, #6*16] mul v5.8H, v5.8H, v13.8H uzp2 v21.8H, v20.8H, v21.8H ldr q23, [src1, #7*16] mul v7.8H, v7.8H, v15.8H uzp2 v23.8H, v22.8H, v23.8H add src1, src1, #8*16 ldr q8, [src2ex, #0*16] mls v1.8H, v0.8H, v28.8H ldr q10, [src2ex, #2*16] mls v3.8H, v2.8H, v28.8H ldr q12, [src2ex, #4*16] mls v5.8H, v4.8H, v28.8H ldr q14, [src2ex, #6*16] mls v7.8H, v6.8H, v28.8H ldr q9, [src2ex, #1*16] str q1, [des, #0*16] ldr q11, [src2ex, #3*16] str q3, [des, #1*16] ldr q13, [src2ex, #5*16] str q5, [des, #2*16] ldr q15, [src2ex, #7*16] str q7, [des, #3*16] add des, des, #4*16 add src2ex, src2ex, #8*16 sqrdmulh v16.8H, v17.8H, v8.8H sqrdmulh v18.8H, v19.8H, v10.8H sqrdmulh v20.8H, v21.8H, v12.8H sqrdmulh v22.8H, v23.8H, v14.8H mul v17.8H, v17.8H, v9.8H mul v19.8H, v19.8H, v11.8H mul v21.8H, v21.8H, v13.8H mul v23.8H, v23.8H, v15.8H mls v17.8H, v16.8H, v28.8H mls v19.8H, v18.8H, v28.8H mls v21.8H, v20.8H, v28.8H mls v23.8H, v22.8H, v28.8H str q17, [des, #0*16] str q19, [des, #1*16] str q21, [des, #2*16] str q23, [des, #3*16] add des, des, #4*16 .unreq Q .unreq des .unreq src1 .unreq src2ex .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul .global _PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul: _PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr s4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif // TODO:interleaving mov counter, #15 _asymmetric_mul_loop: ldr q20, [src1_0, #0*16] uzp1 v6.8H, v16.8H, v18.8H ldr q21, [src1_0, #1*16] uzp1 v7.8H, v17.8H, v19.8H ldr q22, [src2_0, #0*16] mul v6.8H, v6.8H, v4.H[1] ldr q23, [src2_0, #1*16] mul v7.8H, v7.8H, v4.H[1] add src1_0, src1_0, #32 add src2_0, src2_0, #32 smlal v16.4S, v6.4H, v4.H[0] uzp1 v0.8H, v20.8H, v21.8H smlal2 v18.4S, v6.8H, v4.H[0] uzp2 v1.8H, v20.8H, v21.8H smlal v17.4S, v7.4H, v4.H[0] uzp1 v2.8H, v22.8H, v23.8H smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul_montgomery .global _PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul_montgomery PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul_montgomery: _PQCLEAN_MLKEM768_AARCH64__asm_asymmetric_mul_montgomery: push_all des .req x11 src1_0 .req x0 src2_0 .req x1 src2asy_0 .req x2 src1_1 .req x4 src2_1 .req x5 src2asy_1 .req x6 src1_2 .req x8 src2_2 .req x9 src2asy_2 .req x10 src1_3 .req x12 src2_3 .req x13 src2asy_3 .req x14 counter .req x19 ldr q4, [x3] add des, x4, #0 add src1_1, src1_0, #512*1 add src2_1, src2_0, #512*1 add src2asy_1, src2asy_0, #256*1 #if KYBER_K > 2 add src1_2, src1_0, #512*2 add src2_2, src2_0, #512*2 add src2asy_2, src2asy_0, #256*2 #endif #if KYBER_K > 3 add src1_3, src1_0, #512*3 add src2_3, src2_0, #512*3 add src2asy_3, src2asy_0, #256*3 #endif ldr q20, [src1_0, #0*16] ldr q21, [src1_0, #1*16] ldr q22, [src2_0, #0*16] ldr q23, [src2_0, #1*16] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp1 v0.8H, v20.8H, v21.8H uzp2 v1.8H, v20.8H, v21.8H uzp1 v2.8H, v22.8H, v23.8H uzp2 v3.8H, v22.8H, v23.8H ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif mov counter, #15 _asymmetric_mul_montgomery_loop: uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] ldr q20, [src1_0, #0*16] smlal v16.4S, v6.4H, v4.H[0] ldr q21, [src1_0, #1*16] smlal2 v18.4S, v6.8H, v4.H[0] ldr q22, [src2_0, #0*16] smlal v17.4S, v7.4H, v4.H[0] ldr q23, [src2_0, #1*16] smlal2 v19.4S, v7.8H, v4.H[0] add src1_0, src1_0, #32 add src2_0, src2_0, #32 uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H uzp1 v0.8H, v20.8H, v21.8H sqrdmulh v16.8H, v6.8H, v4.H[4] uzp2 v1.8H, v20.8H, v21.8H sqrdmulh v17.8H, v7.8H, v4.H[4] uzp1 v2.8H, v22.8H, v23.8H mul v6.8H, v6.8H, v4.H[5] uzp2 v3.8H, v22.8H, v23.8H mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 ld1 {v28.8H}, [src2asy_0], #16 smull v16.4S, v0.4H, v2.4H ldr q20, [src1_1, #0*16] smull2 v18.4S, v0.8H, v2.8H ldr q21, [src1_1, #1*16] smull v17.4S, v0.4H, v3.4H ldr q22, [src2_1, #0*16] smull2 v19.4S, v0.8H, v3.8H ldr q23, [src2_1, #1*16] add src1_1, src1_1, #32 add src2_1, src2_1, #32 smlal v16.4S, v1.4H, v28.4H uzp1 v8.8H, v20.8H, v21.8H smlal2 v18.4S, v1.8H, v28.8H uzp2 v9.8H, v20.8H, v21.8H smlal v17.4S, v1.4H, v2.4H uzp1 v10.8H, v22.8H, v23.8H smlal2 v19.4S, v1.8H, v2.8H uzp2 v11.8H, v22.8H, v23.8H ld1 {v29.8H}, [src2asy_1], #16 #if KYBER_K > 2 smlal v16.4S, v8.4H, v10.4H ldr q20, [src1_2, #0*16] smlal2 v18.4S, v8.8H, v10.8H ldr q21, [src1_2, #1*16] smlal v17.4S, v8.4H, v11.4H ldr q22, [src2_2, #0*16] smlal2 v19.4S, v8.8H, v11.8H ldr q23, [src2_2, #1*16] add src1_2, src1_2, #32 add src2_2, src2_2, #32 smlal v16.4S, v9.4H, v29.4H uzp1 v12.8H, v20.8H, v21.8H smlal2 v18.4S, v9.8H, v29.8H uzp2 v13.8H, v20.8H, v21.8H smlal v17.4S, v9.4H, v10.4H uzp1 v14.8H, v22.8H, v23.8H smlal2 v19.4S, v9.8H, v10.8H uzp2 v15.8H, v22.8H, v23.8H ld1 {v30.8H}, [src2asy_2], #16 #if KYBER_K > 3 smlal v16.4S, v12.4H, v14.4H ldr q20, [src1_3, #0*16] smlal2 v18.4S, v12.8H, v14.8H ldr q21, [src1_3, #1*16] smlal v17.4S, v12.4H, v15.4H ldr q22, [src2_3, #0*16] smlal2 v19.4S, v12.8H, v15.8H ldr q23, [src2_3, #1*16] add src1_3, src1_3, #32 add src2_3, src2_3, #32 smlal v16.4S, v13.4H, v30.4H uzp1 v24.8H, v20.8H, v21.8H smlal2 v18.4S, v13.8H, v30.8H uzp2 v25.8H, v20.8H, v21.8H smlal v17.4S, v13.4H, v14.4H uzp1 v26.8H, v22.8H, v23.8H smlal2 v19.4S, v13.8H, v14.8H uzp2 v27.8H, v22.8H, v23.8H ld1 {v31.8H}, [src2asy_3], #16 smlal v16.4S, v24.4H, v26.4H smlal2 v18.4S, v24.8H, v26.8H smlal v17.4S, v24.4H, v27.4H smlal2 v19.4S, v24.8H, v27.8H smlal v16.4S, v25.4H, v31.4H smlal2 v18.4S, v25.8H, v31.8H smlal v17.4S, v25.4H, v26.4H smlal2 v19.4S, v25.8H, v26.8H #else smlal v16.4S, v12.4H, v14.4H smlal2 v18.4S, v12.8H, v14.8H smlal v17.4S, v12.4H, v15.4H smlal2 v19.4S, v12.8H, v15.8H smlal v16.4S, v13.4H, v30.4H smlal2 v18.4S, v13.8H, v30.8H smlal v17.4S, v13.4H, v14.4H smlal2 v19.4S, v13.8H, v14.8H #endif #else smlal v16.4S, v8.4H, v10.4H smlal2 v18.4S, v8.8H, v10.8H smlal v17.4S, v8.4H, v11.4H smlal2 v19.4S, v8.8H, v11.8H smlal v16.4S, v9.4H, v29.4H smlal2 v18.4S, v9.8H, v29.8H smlal v17.4S, v9.4H, v10.4H smlal2 v19.4S, v9.8H, v10.8H #endif sub counter, counter, #1 cbnz counter, _asymmetric_mul_montgomery_loop uzp1 v6.8H, v16.8H, v18.8H uzp1 v7.8H, v17.8H, v19.8H mul v6.8H, v6.8H, v4.H[1] mul v7.8H, v7.8H, v4.H[1] smlal v16.4S, v6.4H, v4.H[0] smlal2 v18.4S, v6.8H, v4.H[0] smlal v17.4S, v7.4H, v4.H[0] smlal2 v19.4S, v7.8H, v4.H[0] uzp2 v6.8H, v16.8H, v18.8H uzp2 v7.8H, v17.8H, v19.8H sqrdmulh v16.8H, v6.8H, v4.H[4] sqrdmulh v17.8H, v7.8H, v4.H[4] mul v6.8H, v6.8H, v4.H[5] mul v7.8H, v7.8H, v4.H[5] mls v6.8H, v16.8H, v4.H[0] mls v7.8H, v17.8H, v4.H[0] st2 { v6.8H, v7.8H}, [des], #32 .unreq des .unreq src1_0 .unreq src2_0 .unreq src2asy_0 .unreq src1_1 .unreq src2_1 .unreq src2asy_1 .unreq src1_2 .unreq src2_2 .unreq src2asy_2 .unreq src1_3 .unreq src2_3 .unreq src2asy_3 .unreq counter pop_all ret
mktmansour/MKT-KSA-Geolocation-Security
12,798
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/aarch64/__asm_NTT.S
/* * We offer * CC0 1.0 Universal or the following MIT License for this file. * You may freely choose one of them that applies. * * MIT License * * Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang * Copyright (c) 2023: Vincent Hwang * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "macros.inc" .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_top .global _PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_top PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_top: _PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_top: push_simd Q .req w8 src .req x0 table .req x1 counter .req x11 ldrsh Q, [x2, #0] ldr q0, [table, # 0*16] ldr q1, [table, # 1*16] ldr q2, [table, # 2*16] ldr q3, [table, # 3*16] mov v0.H[0], Q ldr q13, [src, # 9*32] ldr q15, [src, #11*32] ldr q17, [src, #13*32] ldr q19, [src, #15*32] qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32 qo_butterfly_mixll \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32, \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixsls \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #1*32, #3*32, #5*32, #7*32, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #0*32, #2*32, #4*32, #6*32 qo_butterfly_botsls \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #9*32, #11*32, #13*32, #15*32, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #8*32, #10*32, #12*32, #14*32 qo_butterfly_topl \ v13, v15, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) qo_butterfly_mixl \ v5, v7, v9, v11, v13, v15, v17, v19, v28, v29, v30, v31, \ v12, v14, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_mix \ v4, v6, v8, v10, v12, v14, v16, v18, v20, v21, v22, v23, \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v0, \ v0, 2, 3, v0, 2, 3, v0, 2, 3, v0, 2, 3, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v5, v7, v13, v15, v9, v11, v17, v19, v28, v29, v30, v31, \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7 qo_butterfly_mix \ v4, v6, v12, v14, v8, v10, v16, v18, v20, v21, v22, v23, \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v0, \ v0, 4, 5, v0, 4, 5, v0, 6, 7, v0, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v5, v9, v13, v17, v7, v11, v15, v19, v28, v29, v30, v31, \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7 qo_butterfly_mix \ v4, v8, v12, v16, v6, v10, v14, v18, v20, v21, v22, v23, \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v0, \ v1, 0, 1, v1, 2, 3, v1, 4, 5, v1, 6, 7, \ v2, 0, 1, v2, 2, 3, v2, 4, 5, v2, 6, 7 qo_butterfly_mixss \ v4, v6, v8, v10, v5, v7, v9, v11, v28, v29, v30, v31, \ v13, v15, v17, v19, v20, v21, v22, v23, \ v0, \ v3, 0, 1, v3, 2, 3, v3, 4, 5, v3, 6, 7, \ src, \ q5, q7, q9, q11, \ #(16+1*32), #(16+3*32), #(16+5*32), #(16+7*32), \ src, \ q4, q6, q8, q10, \ #(16+0*32), #(16+2*32), #(16+4*32), #(16+6*32) qo_butterfly_botss \ v12, v14, v16, v18, v13, v15, v17, v19, v20, v21, v22, v23, \ src, \ q13, q15, q17, q19, \ #(16+9*32), #(16+11*32), #(16+13*32), #(16+15*32), \ src, \ q12, q14, q16, q18, \ #(16+8*32), #(16+10*32), #(16+12*32), #(16+14*32) .unreq Q .unreq src .unreq table .unreq counter pop_simd ret .align 2 .global PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_bot .global _PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_bot PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_bot: _PQCLEAN_MLKEM768_AARCH64__asm_ntt_SIMD_bot: push_simd Q .req w8 BarrettM .req w9 src0 .req x0 src1 .req x1 table .req x10 counter .req x11 ldrsh Q, [x2, #0] ldrsh BarrettM, [x2, #8] add table, x1, #64 add src0, x0, #256*0 add src1, x0, #256*1 mov v0.H[0], Q mov v0.H[1], BarrettM ldr q28, [src0, # 1*16] ldr q29, [src1, # 1*16] ldr q30, [src0, # 3*16] ldr q31, [src1, # 3*16] trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 mov counter, #3 _ntt_bot_loop: str q28, [src0, # 1*16] ldr q28, [src0, #(64+1*16)] str q29, [src1, # 1*16] ldr q29, [src1, #(64+1*16)] str q30, [src0, # 3*16] ldr q30, [src0, #(64+3*16)] str q31, [src1, # 3*16] ldr q31, [src1, #(64+3*16)] add src0, src0, #64 add src1, src1, #64 trn_4x4_l3 v28, v29, v30, v31, v20, v21, v22, v23, table, q1, q2, q3, #1*16, #2*16, #3*16 do_butterfly_vec_top_2ltrn_4x4 \ v29, v31, v18, v19, v0, v2, v3, v2, v3, \ src0, src1, \ q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16, \ v24, v25, v26, v27, v20, v21, v22, v23 do_butterfly_vec_mixl \ v25, v27, v29, v31, v18, v19, \ v28, v30, v16, v17, \ v0, \ v2, v3, v2, v3, \ table, \ q4, q5, q6, q7, #4*16, #5*16, #6*16, #7*16 do_butterfly_vec_mixl \ v24, v26, v28, v30, v16, v17, \ v27, v31, v18, v19, \ v0, \ v4, v5, v6, v7, \ table, \ q8, q9, q10, q11, #8*16, #9*16, #10*16, #11*16 do_butterfly_vec_mixl \ v25, v29, v27, v31, v18, v19, \ v26, v30, v16, v17, \ v0, \ v4, v5, v6, v7, \ table, \ q12, q13, q14, q15, #12*16, #13*16, #14*16, #15*16 add table, table, #256 do_butterfly_vec_mix v24, v28, v26, v30, v16, v17, v24, v26, v25, v27, v18, v19, v0, v4, v5, v6, v7, v8, v9, v10, v11 do_butterfly_vec_mix v24, v26, v25, v27, v18, v19, v28, v30, v29, v31, v16, v17, v0, v8, v9, v10, v11, v12, v13, v14, v15 do_butterfly_vec_bot_oo_barrett_trn_4x4 \ v28, v30, v29, v31, v16, v17, \ v24, v25, v26, v27, v20, v21, v22, v23, v28, v29, v30, v31, v16, v17, v18, v19, v0, #11, v0 trn_4x4_2s4 v28, v29, v30, v31, v16, v17, v18, v19, src0, src1, q24, q25, q26, q27, #0*16, #0*16, #2*16, #2*16 sub counter, counter, #1 cbnz counter, _ntt_bot_loop str q28, [src0, # 1*16] str q29, [src1, # 1*16] str q30, [src0, # 3*16] str q31, [src1, # 3*16] add src0, src0, #64 add src1, src1, #64 .unreq Q .unreq BarrettM .unreq src0 .unreq src1 .unreq table .unreq counter pop_simd ret